Open shane0 opened 1 year ago
import zipfile
import shutil
import os
def unzip_copy_file(src_file, src_folder, dest_folder):
"""
Extracts a file from a ZIP archive and copies it to another folder.
:param src_file: path to the ZIP archive
:param src_folder: path to the folder inside the archive containing the file to be copied
:param dest_folder: path to the destination folder
"""
with zipfile.ZipFile(src_file, 'r') as zip_ref:
# Get the name of the file to copy
filename = zip_ref.namelist()[0]
# Extract the file to a temporary folder
temp_folder = os.path.join(os.path.dirname(src_file), 'temp')
zip_ref.extractall(temp_folder)
# Copy the file to the destination folder
src_file = os.path.join(temp_folder, src_folder, filename)
dest_file = os.path.join(dest_folder, filename)
shutil.copy2(src_file, dest_file)
# Remove the temporary folder
shutil.rmtree(temp_folder)
if __name__ == '__main__':
# Example usage
zip_file = '/path/to/archive.zip'
source_folder = 'folder1'
dest_folder = '/path/to/destination/folder'
unzip_copy_file(zip_file, source_folder, dest_folder)
import io
import pdfminer
import pdfminer.high_level
import pdfminer.layout
def pdf_to_markdown(pdf_path, markdown_path):
with open(pdf_path, 'rb') as pdf_file, open(markdown_path, 'w') as markdown_file:
resource_manager = pdfminer.high_level.PDFResourceManager()
layout_params = pdfminer.layout.LAParams()
device = pdfminer.high_level.ParagraphsConverter(resource_manager, codec='utf-8', laparams=layout_params)
pdfminer.high_level.extract_pages(pdf_file, device)
lines = []
for page_layout in device.pages:
for line in page_layout:
lines.append(line.get_text().strip())
lines.append('')
markdown_file.write('\n'.join(lines))
pdf_to_markdown('example.pdf', 'example.md')
import subprocess
import os
input_file = 'example.docx'
output_file = 'example.md'
# Convert Word document to Markdown using pandoc
subprocess.run(['pandoc', input_file, '-f', 'docx', '-t', 'markdown', '-o', output_file])
# Print output file contents
with open(output_file, 'r') as f:
print(f.read())
# Optional: Delete output file
os.remove(output_file)
import pypandoc
def convert_word_to_markdown(input_path, output_path):
# Specify the input and output formats
input_format = 'docx'
output_format = 'markdown_github'
# Use Pandoc to convert the file
try:
pypandoc.convert_file(input_path, output_format, outputfile=output_path, format=input_format)
print(f'Successfully converted {input_path} to {output_path}')
except OSError as error:
print(f'Error converting {input_path} to {output_path}: {error}')
import pandas as pd
def combine_csv(file1, file2, columns):
"""
This function reads and combines two CSV files and allows the user to select which columns to include.
"""
# Read in the two CSV files as dataframes
df1 = pd.read_csv(file1)
df2 = pd.read_csv(file2)
# Merge the two dataframes on a common key column (if one exists)
merged_df = pd.merge(df1, df2, on='key_column', how='outer')
# Select the desired columns and create a new dataframe with only those columns
selected_df = merged_df[columns]
# Return the selected dataframe
return selected_df
combined_df = combine_csv('file1.csv', 'file2.csv', ['column1', 'column2', 'column3'])
import pandas as pd
def convert_spreadsheet_to_markdown(spreadsheet_file):
"""
This function reads in a spreadsheet file and converts it into a Markdown table.
"""
# Read in the spreadsheet file as a dataframe
df = pd.read_excel(spreadsheet_file)
# Convert the dataframe to a Markdown table
markdown_table = df.to_markdown(index=False)
# Print the Markdown table
print(markdown_table)
# Troubleshooting Guide
## Problem
[What is the problem you're experiencing? Be as specific as possible.]
## Steps to Reproduce
[Provide step-by-step instructions for reproducing the problem.]
## Expected Results
[What should happen when these steps are followed?]
## Actual Results
[What is actually happening when you follow these steps? Be as specific as possible.]
## Error Messages
[Include any error messages or warnings that are displayed.]
## Troubleshooting Steps
[Include any troubleshooting steps that you've already tried.]
## Possible Solutions
[If you have any ideas for possible solutions, list them here.]
## Resources
[Include any relevant resources, such as documentation or support forums.]
# Tutorial Title
## Introduction
[Provide an introduction to the tutorial and explain what the reader will learn.]
## Prerequisites
[Provide a list of any prerequisites or required background knowledge.]
## Steps
[Provide a list of steps that the reader should follow in order to complete the tutorial.]
### Step 1: [Step Title]
[Provide a description of the first step, including any code or commands that the reader needs to run.]
### Step 2: [Step Title]
[Provide a description of the second step, including any code or commands that the reader needs to run.]
### Step 3: [Step Title]
[Provide a description of the third step, including any code or commands that the reader needs to run.]
### Step 4: [Step Title]
[Provide a description of the fourth step, including any code or commands that the reader needs to run.]
## Conclusion
[Provide a conclusion to the tutorial, summarizing what the reader has learned.]
## Resources
[Provide a list of any resources that the reader may find helpful, such as additional reading or documentation.]
# Bullet Journal Title
## Monthly Spread
### Goals
- [ ] Goal 1
- [ ] Goal 2
- [ ] Goal 3
### Tasks
- [ ] Task 1
- [ ] Task 2
- [ ] Task 3
### Events
- [ ] Event 1
- [ ] Event 2
- [ ] Event 3
## Weekly Spread
### Week of [Date]
- [ ] Task 1
- [ ] Task 2
- [ ] Task 3
### Week of [Date]
- [ ] Task 1
- [ ] Task 2
- [ ] Task 3
## Daily Spread
### [Date]
- [ ] Task 1
- [ ] Task 2
- [ ] Task 3
### [Date]
- [ ] Task 1
- [ ] Task 2
- [ ] Task 3
## Notes
[Provide a section for general notes or reflections on the day, week, or month.]
# Bullet Journal Notebook
## Index
- [Month 1](#month-1)
- [Month 2](#month-2)
- [Month 3](#month-3)
## Future Log
[Provide a section for future events, goals, and tasks.]
## Month 1
[Include the monthly spread for Month 1, with goals, tasks, events, weekly spreads, and daily spreads.]
## Month 2
[Include the monthly spread for Month 2, with goals, tasks, events, weekly spreads, and daily spreads.]
## Month 3
[Include the monthly spread for Month 3, with goals, tasks, events, weekly spreads, and daily spreads.]
## Collections
[Provide a section for collections of notes or lists that are not associated with specific dates. Examples include books to read, movies to watch, or travel destinations.]
#!/bin/bash
# Create a new folder for the bullet journal
echo "Creating new folder for bullet journal..."
mkdir Bullet-Journal
# Move into the new folder
cd Bullet-Journal
# Create an index file for the bullet journal
echo "# Bullet Journal Notebook" > index.md
# Create a future log file for the bullet journal
echo "# Future Log" > future-log.md
# Create monthly files for the bullet journal
echo "# Month 1" > month-1.md
echo "# Month 2" > month-2.md
echo "# Month 3" > month-3.md
# Create a collections file for the bullet journal
echo "# Collections" > collections.md
echo "Bullet journal created!"
import os
# Create a new folder for the bullet journal
print("Creating new folder for bullet journal...")
os.mkdir("Bullet-Journal")
# Move into the new folder
os.chdir("Bullet-Journal")
# Create an index file for the bullet journal
with open("index.md", "w") as f:
f.write("# Bullet Journal Notebook")
# Create a future log file for the bullet journal
with open("future-log.md", "w") as f:
f.write("# Future Log")
# Create monthly files for the bullet journal
with open("month-1.md", "w") as f:
f.write("# Month 1")
with open("month-2.md", "w") as f:
f.write("# Month 2")
with open("month-3.md", "w") as f:
f.write("# Month 3")
# Create a collections file for the bullet journal
with open("collections.md", "w") as f:
f.write("# Collections")
print("Bullet journal created!")
# Date: [insert date here]
## Tasks
- [ ] Task 1
- [ ] Task 2
- [ ] Task 3
## Events
- [ ] Event 1
- [ ] Event 2
- [ ] Event 3
## Notes
- Note 1
- Note 2
- Note 3
Bullet-Journal/
├── index.md
├── future-log.md
├── month-1.md
├── month-2.md
├── month-3.md
├── collections.md
└── daily/
├── 2023-02-04.md
├── 2023-02-05.md
├── 2023-02-06.md
└── ...
def load_barbell(weight, bar_weight=45):
"""
Load a barbell to a given weight.
Args:
- weight (float): the weight to load in pounds
- bar_weight (float): the weight of the barbell in pounds (default 45)
Returns:
- tuple of floats: the weights to load on each side of the barbell in pounds
"""
# Subtract the weight of the barbell and divide by 2 to get the weight on each side
plate_weight = (weight - bar_weight) / 2
# Possible weight combinations of plates
plate_sizes = [45, 35, 25, 10, 5, 2.5]
# Initialize the list of plate weights
plates = []
# Load the barbell with the largest plates first
for size in plate_sizes:
if plate_weight >= size:
plates.append(size)
plate_weight -= size
# Return the weights on each side of the barbell
return tuple(plates)
function loadBarbell(weight, barWeight = 45) {
/*
Load a barbell to a given weight.
Args:
- weight (number): the weight to load in pounds
- barWeight (number): the weight of the barbell in pounds (default 45)
Returns:
- an array of numbers: the weights to load on each side of the barbell in pounds
*/
// Subtract the weight of the barbell and divide by 2 to get the weight on each side
let plateWeight = (weight - barWeight) / 2;
// Possible weight combinations of plates
const plateSizes = [45, 35, 25, 10, 5, 2.5];
// Initialize the array of plate weights
const plates = [];
// Load the barbell with the largest plates first
for (let i = 0; i < plateSizes.length; i++) {
const size = plateSizes[i];
if (plateWeight >= size) {
plates.push(size);
plateWeight -= size;
i--; // stay on this plate size in case it can be used again
}
}
// Return the weights on each side of the barbell
return plates;
}
function Load-Barbell {
[CmdletBinding()]
param(
[Parameter(Mandatory=$true)]
[double]$Weight,
[Parameter()]
[double]$BarWeight = 45
)
# Subtract the weight of the barbell and divide by 2 to get the weight on each side
$PlateWeight = ($Weight - $BarWeight) / 2
# Possible weight combinations of plates
$PlateSizes = @(45, 35, 25, 10, 5, 2.5)
# Initialize the array of plate weights
$Plates = @()
# Load the barbell with the largest plates first
for ($i = 0; $i -lt $PlateSizes.Count; $i++) {
$Size = $PlateSizes[$i]
if ($PlateWeight -ge $Size) {
$Plates += $Size
$PlateWeight -= $Size
$i-- # stay on this plate size in case it can be used again
}
}
# Return the weights on each side of the barbell
return $Plates
}
$NotebookName = Read-Host "Enter a name for your Bullet Journal notebook"
New-Item -ItemType Directory -Path "$($NotebookName)"
# Create folders for different collections
New-Item -ItemType Directory -Path "$($NotebookName)\Future Log"
New-Item -ItemType Directory -Path "$($NotebookName)\Monthly Log"
New-Item -ItemType Directory -Path "$($NotebookName)\Daily Log"
New-Item -ItemType Directory -Path "$($NotebookName)\Collections"
# Create sample files for each folder
$SampleMarkdown = @"
# Sample Markdown File
This is a sample file for the $($NotebookName) Bullet Journal notebook.
"@
$SampleMarkdown | Out-File "$($NotebookName)\Future Log\FutureLog.md"
$SampleMarkdown | Out-File "$($NotebookName)\Monthly Log\MonthlyLog.md"
$SampleMarkdown | Out-File "$($NotebookName)\Daily Log\DailyLog.md"
$SampleMarkdown | Out-File "$($NotebookName)\Collections\Collections.md"
Write-Host "Your $($NotebookName) Bullet Journal notebook has been created."
import os
import shutil
def compress_folder(folder_path, output_file_path):
"""
Compresses a folder while retaining its folder layout.
:param folder_path: The path to the folder to compress.
:param output_file_path: The path and filename for the output compressed file.
"""
shutil.make_archive(output_file_path, 'zip', folder_path)
def uncompress_folder(compressed_file_path, output_folder_path):
"""
Uncompresses a compressed file while retaining its folder layout.
:param compressed_file_path: The path to the compressed file.
:param output_folder_path: The path to the output folder.
"""
shutil.unpack_archive(compressed_file_path, output_folder_path)
sudo apt-get install zip unzip
#!/bin/bash
compress_folder() {
# Compress a folder while retaining its folder layout.
# $1: path to the folder to compress
# $2: path and filename for the output compressed file
cd "$(dirname "$1")"
zip -r -q "$(basename "$2").zip" "$(basename "$1")"
}
uncompress_folder() {
# Uncompress a compressed file while retaining its folder layout.
# $1: path to the compressed file
# $2: path to the output folder
cd "$2"
unzip -q "$1" -d .
}
# Example usage
compress_folder "/path/to/folder" "/path/to/output_file"
uncompress_folder "/path/to/compressed_file.zip" "/path/to/output_folder"
graph TD
A[User reports issue] --> B(Troubleshooting)
B --> C{Identify problem}
C -->|Hardware| D[Replace faulty part]
C -->|Software| E[Update software]
C -->|Settings| F[Adjust settings]
import re
def count_matches(log_files, good_matches, bad_matches):
good_count = 0
bad_count = 0
for log_file in log_files:
with open(log_file, 'r') as f:
log_content = f.read()
for good_match in good_matches:
good_count += len(re.findall(good_match, log_content))
for bad_match in bad_matches:
bad_count += len(re.findall(bad_match, log_content))
return good_count, bad_count
log_files = ['file1.log', 'file2.log', 'file3.log']
good_matches = ['successful', 'completed']
bad_matches = ['error', 'failed']
good_count, bad_count = count_matches(log_files, good_matches, bad_matches)
print(f"Found {good_count} good matches and {bad_count} bad matches in the log files.")
function countMatches(logFiles, goodRegex, badRegex) {
let goodCount = 0;
let badCount = 0;
for (let i = 0; i < logFiles.length; i++) {
const logFile = logFiles[i];
for (let j = 0; j < goodRegex.length; j++) {
const regex = new RegExp(goodRegex[j], 'g');
const matches = logFile.match(regex);
if (matches) {
goodCount += matches.length;
}
}
for (let k = 0; k < badRegex.length; k++) {
const regex = new RegExp(badRegex[k], 'g');
const matches = logFile.match(regex);
if (matches) {
badCount += matches.length;
}
}
}
return { goodCount, badCount };
}
function Get-MatchCounts {
param (
[Parameter(Mandatory=$true)]
[string[]]$LogFiles,
[Parameter(Mandatory=$true)]
[string[]]$GoodMatches,
[Parameter(Mandatory=$true)]
[string[]]$BadMatches
)
$goodCount = 0
$badCount = 0
foreach ($file in $LogFiles) {
if (Test-Path $file) {
$content = Get-Content $file -Raw
foreach ($pattern in $GoodMatches) {
$goodCount += [regex]::Matches($content, $pattern).Count
}
foreach ($pattern in $BadMatches) {
$badCount += [regex]::Matches($content, $pattern).Count
}
}
}
return @{
GoodCount = $goodCount
BadCount = $badCount
}
}
import re
def search_logs(log_files, good_matches, bad_matches):
good_count = 0
bad_count = 0
for file in log_files:
with open(file) as f:
for line in f:
for good_match in good_matches:
if re.search(good_match, line):
good_count += 1
with open('good.md', 'a') as good_file:
good_file.write(line)
for bad_match in bad_matches:
if re.search(bad_match, line):
bad_count += 1
with open('bad.md', 'a') as bad_file:
bad_file.write(line)
return good_count, bad_count
import re
def remove_lines(files, regex):
count = 0
for file in files:
with open(file, 'r') as f:
lines = f.readlines()
with open(file, 'w') as f:
for line in lines:
if not re.search(regex, line):
f.write(line)
else:
count += 1
return count
import re
def remove_lines_from_files(files, regexes):
"""Remove lines that match any of the regexes from the files.
Parameters:
files (list): A list of file paths to process.
regexes (list): A list of regular expressions to match lines to remove.
Returns:
int: The total number of lines removed across all files.
"""
total_removed = 0
for file in files:
with open(file, 'r') as f_in, open(f'{file}.new', 'w') as f_out:
lines_removed = 0
for line in f_in:
if any(re.search(regex, line) for regex in regexes):
lines_removed += 1
else:
f_out.write(line)
total_removed += lines_removed
return total_removed
import re
def replace_regex_matches(filenames, regex_matches, replace_with):
"""Replace regex matches with specified characters in files.
Args:
filenames (list): A list of file names.
regex_matches (list): A list of regex patterns to match.
replace_with (str): The string to replace the matched patterns.
Returns:
int: The total number of replacements made.
"""
total_replacements = 0
for filename in filenames:
with open(filename, 'r') as f:
content = f.read()
for regex in regex_matches:
matches = re.findall(regex, content)
for match in matches:
content = content.replace(match, replace_with)
total_replacements += 1
new_filename = f"{filename[:-4]}_replaced.txt" # Add "_replaced" to file name
with open(new_filename, 'w') as f:
f.write(content)
return total_replacements
filenames = ['file1.txt', 'file2.txt']
regex_matches = [r'\d+', r'[aeiou]+']
replace_with = 'X'
total_replacements = replace_regex_matches(filenames, regex_matches, replace_with)
print(f"Total replacements made: {total_replacements}")
import csv
import json
def convert_to_csv(log_file_path, csv_file_path):
with open(log_file_path) as log_file, open(csv_file_path, mode='w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(['timestamp', 'level', 'message'])
for line in log_file:
try:
log_line = json.loads(line)
writer.writerow([log_line['timestamp'], log_line['level'], log_line['message']])
except:
pass
import re
import pandas as pd
def find_and_count_matches(files, regex_matches):
# Initialize a dictionary to store the match counts for each file
match_counts = {}
# Iterate over each file in the list of files
for file in files:
# Initialize a dictionary to store the match counts for this file
file_match_counts = {}
# Read the contents of the file
with open(file, 'r') as f:
contents = f.read()
# Iterate over each regex match
for regex_match in regex_matches:
# Count the number of matches in the file
match_count = len(re.findall(regex_match, contents))
# Store the match count in the dictionary
file_match_counts[regex_match] = match_count
# Add the file's match counts to the overall match counts dictionary
match_counts[file] = file_match_counts
# Create a pandas DataFrame from the match counts dictionary
df = pd.DataFrame.from_dict(match_counts, orient='index')
# Fill in any missing values with zeros
df.fillna(0, inplace=True)
# Return the DataFrame
return df
files = ['file1.txt', 'file2.txt', 'file3.txt']
regex_matches = [r'pattern1', r'pattern2', r'pattern3']
match_counts_df = find_and_count_matches(files, regex_matches)
print(match_counts_df)
match_counts_df.to_csv('match_counts.csv')
#!/bin/bash
# Define variables
HOST="example.com"
ENDPOINT="/api/users"
EXPECTED_RESPONSE_CODE=200
EXPECTED_RESPONSE_BODY="Hello, World!"
# Make the API request
RESPONSE=$(curl -s -o /dev/null -w "%{http_code}:%{response_code}" "https://${HOST}${ENDPOINT}")
# Extract the response code and response body from the response string
RESPONSE_CODE=$(echo "$RESPONSE" | cut -d':' -f1)
RESPONSE_BODY=$(echo "$RESPONSE" | cut -d':' -f2)
# Check if the response code is what we expect
if [ "$RESPONSE_CODE" -ne "$EXPECTED_RESPONSE_CODE" ]; then
echo "Error: Unexpected response code. Expected $EXPECTED_RESPONSE_CODE but got $RESPONSE_CODE"
exit 1
fi
# Check if the response body contains the expected text
if [[ ! "$RESPONSE_BODY" =~ $EXPECTED_RESPONSE_BODY ]]; then
echo "Error: Response body does not contain expected text: $EXPECTED_RESPONSE_BODY"
exit 1
fi
echo "API test passed successfully!"
exit 0
import requests
import re
def test_api(host, endpoint, expected_status_code, expected_text_match):
# Construct the URL
url = f"{host}/{endpoint}"
# Send a GET request to the API endpoint
response = requests.get(url)
# Check the response status code
assert response.status_code == expected_status_code, f"Expected status code {expected_status_code}, but got {response.status_code}"
# Check the response text for the expected match
regex_match = re.search(expected_text_match, response.text)
assert regex_match is not None, f"Expected text match '{expected_text_match}' not found in response text"
return True
test_api("https://jsonplaceholder.typicode.com", "todos/1", 200, r'"title": "delectus aut autem"')
#!/bin/bash
# Define list of services and processes to check
SERVICES=("nginx" "mysql")
PROCESSES=("python" "node")
# Check if services are running
for service in "${SERVICES[@]}"
do
if systemctl is-active --quiet "$service"; then
echo "$service is running."
else
echo "$service is not running."
fi
done
# Check if processes are running
for process in "${PROCESSES[@]}"
do
if pgrep -x "$process" > /dev/null; then
echo "$process is running."
else
echo "$process is not running."
fi
done
# Define list of API endpoints to curl
ENDPOINTS=("https://api.example.com/users" "https://api.example.com/products")
# Check endpoints for status code and text
for endpoint in "${ENDPOINTS[@]}"
do
response=$(curl -s -o /dev/null -w "%{http_code} %{response_content_type}" "$endpoint")
status_code=$(echo "$response" | awk '{print $1}')
content_type=$(echo "$response" | awk '{print $2}')
if [[ $status_code -eq 200 && $content_type =~ "application/json" ]]; then
echo "$endpoint is available."
else
echo "$endpoint is not available."
fi
done
#!/bin/bash
# Get the headers for the CSV file
echo "File,Process,PID,Port,Protocol" > listening_files.csv
# Loop through each file in /proc/net/tcp that is listening
for file in /proc/net/tcp*; do
if grep -q "01" "$file"; then
# Get the process name and PID from the symbolic link in /proc/$pid/exe
pid=$(echo $file | cut -d'/' -f4)
process=$(readlink /proc/$pid/exe | cut -d'/' -f3)
# Get the port and protocol from the file
port_hex=$(echo $file | cut -d':' -f2)
port_dec=$((0x$port_hex))
protocol=$(echo $file | cut -d':' -f1)
# Get the full path to the file
inode=$(echo $file | cut -d':' -f3)
path=$(find /proc/*/fd -lname "socket:\[$inode\]" -print -quit | sed 's|/proc/\(.*\)/fd.*|\1|' | xargs readlink -f)
# Add the data to the CSV file
echo "$path,$process,$pid,$port_dec,$protocol" >> listening_files.csv
fi
done
#!/bin/bash
echo "Port | File Path"
echo "-------|----------"
for port in $(lsof -i -P -n | awk '$4=="LISTEN" {print $9}' | awk -F ':' '{print $2}' | sort | uniq); do
echo "${port} | $(lsof -i -P -n | grep LISTEN | grep ":${port} " | awk '{print $9}' | sort | uniq)"
done
#!/bin/bash
# Look for any running processes containing "api" in their name
pids=$(ps -ef | grep api | grep -v grep | awk '{print $2}')
# Loop through the PIDs and get the associated listening ports and files
for pid in $pids; do
echo "API process with PID $pid is listening on the following ports:"
lsof -p $pid -i -n | grep LISTEN
echo ""
done
import os
import re
import csv
def search_files_regex(directory, extension, regex, output_file):
# Open the output file in append mode
with open(output_file, 'a', newline='') as csvfile:
writer = csv.writer(csvfile)
# Find all files with the given extension in the given directory and its subdirectories
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(extension):
file_path = os.path.join(root, file)
# Search for the regex match in the file
with open(file_path, 'r') as f:
for line in f:
matches = re.findall(regex, line)
if len(matches) > 0:
# Write the filename and match to the output file
writer.writerow([file_path, matches[0]])
import zipfile
import os
import shutil
# Set the path to the zip file and the file to search for
zip_file_path = 'path/to/zip/file.zip'
file_to_search = 'file_to_search.txt'
# Set the path to the folder to copy the file to
destination_folder = 'path/to/destination/folder'
# Open the zip file and search for the file
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
for file in zip_file.namelist():
if file.endswith(file_to_search):
# Extract the file from the zip file to a temporary location
zip_file.extract(file, 'temp')
# Copy the file from the temporary location to the destination folder
shutil.copy(os.path.join('temp', file), destination_folder)
# Remove the temporary location
shutil.rmtree('temp')
break
import zipfile
import os
import shutil
import re
# Set the path to the zip file and the pattern to search for
zip_file_path = 'path/to/zip/file.zip'
pattern = r'.*\.txt' # Matches any file ending in .txt
# Set the path to the folder to copy the files to
destination_folder = 'path/to/destination/folder'
# Open the zip file and search for the files that match the pattern
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
for file in zip_file.namelist():
if re.match(pattern, file):
# Extract the file from the zip file to a temporary location
zip_file.extract(file, 'temp')
# Copy the file from the temporary location to the destination folder
shutil.copy(os.path.join('temp', file), destination_folder)
# Remove the temporary location
shutil.rmtree('temp')
import zipfile
import os
import shutil
import re
def extract_files_from_zip(zip_file_path, pattern, destination_folder):
"""
Extracts files from a zip file that match a regular expression pattern and copies them to a destination folder.
Args:
zip_file_path (str): Path to the zip file.
pattern (str): Regular expression pattern to match files.
destination_folder (str): Path to the folder where the files will be copied.
Returns:
None
"""
# Open the zip file and search for the files that match the pattern
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
for file in zip_file.namelist():
if re.match(pattern, file):
# Extract the file from the zip file to a temporary location
zip_file.extract(file, 'temp')
# Copy the file from the temporary location to the destination folder
shutil.copy(os.path.join('temp', file), destination_folder)
# Remove the temporary location
shutil.rmtree('temp')
import zipfile
import io
import re
def search_zip_file(zip_file_path, regex_list):
"""
Searches for files inside a zip file whose content matches a list of regular expression patterns.
Args:
zip_file_path (str): Path to the zip file.
regex_list (list): List of regular expression patterns.
Returns:
A list of filenames of files whose content matches the regular expression patterns.
"""
matching_files = []
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
for file_name in zip_file.namelist():
with zip_file.open(file_name, 'r') as file:
content = io.TextIOWrapper(file).read()
if all(re.search(regex, content) for regex in regex_list):
matching_files.append(file_name)
return matching_files
use the above
import search_zip
zip_file_path = 'path/to/zip/file.zip'
regex_list = ['hello', 'world']
matching_files = search_zip.search_zip_file(zip_file_path, regex_list)
print(matching_files)
import zipfile
import os
import shutil
def copy_files_by_extension(zip_file_path, extension, output_folder):
"""
Copies files from a zip file to a specified folder that match a given file extension.
Args:
zip_file_path (str): Path to the zip file.
extension (str): File extension to match (e.g. '.txt', '.pdf').
output_folder (str): Path to the folder where the matching files will be copied.
Returns:
None
"""
with zipfile.ZipFile(zip_file_path, 'r') as zip_file:
for file in zip_file.namelist():
if os.path.splitext(file)[1] == extension:
# Extract the file from the zip file to a temporary location
zip_file.extract(file, 'temp')
# Copy the file from the temporary location to the output folder
shutil.copy(os.path.join('temp', file), output_folder)
# Remove the temporary location
shutil.rmtree('temp')
import zip_file_utils
zip_file_path = 'path/to/zip/file.zip'
extension = '.txt'
output_folder = 'path/to/output/folder'
zip_file_utils.copy_files_by_extension(zip_file_path, extension, output_folder)
import shutil
def copy_files(file_paths):
"""
Copies a list of files to a list of destinations.
Args:
file_paths (list of tuples): List of tuples, each containing a source file path and a destination path.
Returns:
None
"""
for src_path, dest_path in file_paths:
shutil.copy(src_path, dest_path)
file_paths = [
('path/to/source1.txt', 'path/to/dest1.txt'),
('path/to/source2.txt', 'path/to/dest2.txt'),
('path/to/source3.txt', 'path/to/dest3.txt'),
]
import file_utils
file_paths = [
('path/to/source1.txt', 'path/to/dest1.txt'),
('path/to/source2.txt', 'path/to/dest2.txt'),
('path/to/source3.txt', 'path/to/dest3.txt'),
]
file_utils.copy_files(file_paths)
import os
import shutil
import zipfile
def copy_files(file_paths, zip_path=None):
"""
Copies a list of files to a list of destinations, and optionally zips them to a specified location.
Args:
file_paths (list of tuples): List of tuples, each containing a source file path and a destination path.
zip_path (str): Optional path to a zip file to save the copied files to.
Returns:
None
"""
for src_path, dest_path in file_paths:
shutil.copy(src_path, dest_path)
if zip_path is not None:
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for _, dest_path in file_paths:
zip_file.write(dest_path)
return None
import file_utils
file_paths = [
('path/to/source1.txt', 'path/to/dest1.txt'),
('path/to/source2.txt', 'path/to/dest2.txt'),
('path/to/source3.txt', 'path/to/dest3.txt'),
]
zip_path = 'path/to/zipfile.zip'
file_utils.copy_files(file_paths, zip_path)
flat list if the folders are the same
dest1.txt
dest2.txt
dest3.txt
if not all in the same then it uses the destination folder structure
path/to/dir1/dest1.txt
path/to/dir2/dest2.txt
path/to/dir3/dest3.txt
import re
def split_log_file(log_file_path, output_dir):
"""
Splits a log file into separate files for each error type, based on regex matches.
Args:
log_file_path (str): Path to the log file to split.
output_dir (str): Path to the directory to save the output files in.
Returns:
None
"""
# Define regex patterns for each error type
info_pattern = re.compile(r'\bINFO\b')
warn_pattern = re.compile(r'\bWARN(?:ING)?\b')
error_pattern = re.compile(r'\bERROR\b')
debug_pattern = re.compile(r'\bDEBUG\b')
# Open the log file for reading
with open(log_file_path, 'r') as log_file:
# Loop through each line in the file
for line in log_file:
# Determine the error type of the line based on regex matches
if info_pattern.search(line):
error_type = 'info'
elif warn_pattern.search(line):
error_type = 'warn'
elif error_pattern.search(line):
error_type = 'error'
elif debug_pattern.search(line):
error_type = 'debug'
else:
error_type = None
# If an error type was found, save the line to the appropriate output file
if error_type is not None:
output_file_path = f'{output_dir}/{error_type}.log'
with open(output_file_path, 'a') as output_file:
output_file.write(line)
return None
import log_utils
log_file_path = 'path/to/logfile.log'
output_dir = 'path/to/output/dir'
log_utils.split_log_file(log_file_path, output_dir)
import os
from datetime import datetime
def split_log_by_hour(log_file_path):
# Create a directory to store the split log files
output_dir = f"{os.path.splitext(log_file_path)[0]}_split_by_hour"
os.makedirs(output_dir, exist_ok=True)
# Open the log file and read the lines
with open(log_file_path, "r") as log_file:
log_lines = log_file.readlines()
# Iterate over the log lines and split them by hour
current_hour = None
current_hour_lines = []
for line in log_lines:
# Parse the timestamp from the log line
timestamp_str = line.split()[0] # Assuming timestamp is the first word in the line
timestamp = datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").utcnow()
# Check if the current line belongs to a different hour than the previous lines
if current_hour is None or timestamp.hour != current_hour:
# Write the previous hour's lines to a file (if there were any)
if current_hour_lines:
output_file_path = os.path.join(output_dir, f"{current_hour}.log")
with open(output_file_path, "w") as output_file:
output_file.writelines(current_hour_lines)
# Start collecting lines for the new hour
current_hour = timestamp.hour
current_hour_lines = []
# Add the line to the current hour's lines
current_hour_lines.append(line)
# Write the lines for the last hour to a file
if current_hour_lines:
output_file_path = os.path.join(output_dir, f"{current_hour}.log")
with open(output_file_path, "w") as output_file:
output_file.writelines(current_hour_lines)
import os
import re
import csv
def search_logs(log_files, regex_list, output_file_path):
# Open the output CSV file for writing
with open(output_file_path, 'w', newline='') as output_file:
csv_writer = csv.writer(output_file)
csv_writer.writerow(['Filename', 'Matched Line'])
# Iterate over the log files and search for lines that match the regular expressions
for log_file_path in log_files:
with open(log_file_path, 'r') as log_file:
for line in log_file:
for regex in regex_list:
if re.search(regex, line):
csv_writer.writerow([os.path.basename(log_file_path), line.strip()])
break # Only record the first matching regex for each line
import os
import re
import datetime
def process_files(input_files, output_directory, start_time, end_time):
# Convert start_time and end_time strings to datetime objects
start_time = datetime.datetime.strptime(start_time, '%Y-%m-%d %H:%M:%S')
end_time = datetime.datetime.strptime(end_time, '%Y-%m-%d %H:%M:%S')
# Create the output directory if it doesn't already exist
if not os.path.exists(output_directory):
os.makedirs(output_directory)
# Iterate over each input file and extract matching lines to separate output files
for input_file in input_files:
with open(input_file, 'r') as f:
# Extract the filename and extension from the input file path
file_basename, file_extension = os.path.splitext(os.path.basename(input_file))
# Create output file paths based on the input file name and output directory
output_file_paths = {
'info': os.path.join(output_directory, f'{file_basename}-info{file_extension}'),
'warn': os.path.join(output_directory, f'{file_basename}-warn{file_extension}'),
'error': os.path.join(output_directory, f'{file_basename}-error{file_extension}'),
'debug': os.path.join(output_directory, f'{file_basename}-debug{file_extension}')
}
# Open the output files for writing
output_files = {log_type: open(file_path, 'w') for log_type, file_path in output_file_paths.items()}
# Iterate over each line in the input file and write matching lines to the appropriate output file
for line in f:
# Extract the timestamp from the line and convert it to a datetime object
timestamp_str = re.search(r'\[(.*?)\]', line).group(1)
timestamp = datetime.datetime.strptime(timestamp_str, '%Y-%m-%d %H:%M:%S.%f')
# Check if the timestamp falls within the specified range
if start_time <= timestamp <= end_time:
# Extract the log type from the line
log_type = re.search(r'\b(info|warn|error|debug)\b', line).group(1)
# Write the line to the appropriate output file
output_files[log_type].write(line)
# Close the output files
for output_file in output_files.values():
output_file.close()
import process_logs
# specify the start and end time to filter logs
start_time = "2022-03-08 12:00:00"
end_time = "2022-03-08 13:00:00"
# process logs and output separate files for logs that match the time range
process_logs.process_logs_by_time_range(["log1.txt", "log2.txt"], start_time, end_time)
import requests
def get_sunset_time(lat, lng):
"""
Returns the sunset time for a given latitude and longitude using the Sunrise-Sunset API.
Parameters:
lat (float): The latitude of the location.
lng (float): The longitude of the location.
Returns:
str: The sunset time in ISO 8601 format, or an error message if the request failed.
"""
# Make a request to the Sunrise-Sunset API to get the sunset time for the location
response = requests.get(f"https://api.sunrise-sunset.org/json?lat={lat}&lng={lng}&formatted=0")
# Check if the request was successful
if response.status_code == 200:
# Parse the JSON response to get the sunset time
data = response.json()
sunset_time = data["results"]["sunset"]
return sunset_time
else:
return "Error: Could not retrieve sunset time"
import React, { useState } from 'react';
function BulletJournal() {
// Define state for the bullet journal entries
const [entries, setEntries] = useState([]);
// Define function to add a new entry
function addEntry(text) {
const newEntry = { text };
setEntries([...entries, newEntry]);
}
// Define function to remove an entry
function removeEntry(index) {
const newEntries = [...entries];
newEntries.splice(index, 1);
setEntries(newEntries);
}
// Define function to render a single entry
function renderEntry(entry, index) {
return (
<li key={index}>
{entry.text}
<button onClick={() => removeEntry(index)}>Remove</button>
</li>
);
}
// Define function to render the list of entries
function renderEntries() {
return entries.map(renderEntry);
}
// Define function to handle form submission
function handleSubmit(event) {
event.preventDefault();
const form = event.target;
const text = form.text.value;
addEntry(text);
form.reset();
}
// Render the bullet journal component
return (
<div>
<h1>Bullet Journal</h1>
<form onSubmit={handleSubmit}>
<input type="text" name="text" placeholder="Add new entry" required />
<button type="submit">Add</button>
</form>
<ul>{renderEntries()}</ul>
</div>
);
}
export default BulletJournal;
import to page
import React from 'react';
import Layout from '@theme/Layout';
import BulletJournal from '../components/BulletJournal';
function BulletJournalPage() {
return (
<Layout>
<div className="container">
<BulletJournal />
</div>
</Layout>
);
}
export default BulletJournalPage;
siteconfig.js
module.exports = {
// ...
themeConfig: {
// ...
navbar: {
// ...
items: [
// ...
{
to: '/bullet-journal',
label: 'Bullet Journal',
position: 'right',
},
],
},
},
routes: [
{
path: '/bullet-journal',
component: '@site/src/pages/BulletJournalPage',
},
],
};
```sh
my-docusaurus-app/
docs/
...
src/
pages/
index.js
BulletJournalPage.js
components/
BulletJournal.js
static/
...
package.json
docusaurus.config.js
version: '3'
services:
app:
build: .
ports:
- "3000:3000"
volumes:
- ./website:/app/website
- ./docs:/app/docs
environment:
- NODE_ENV=development
command: npm start
utilities