State-of-the-Art Deep Learning scripts organized by models - easy to train and deploy with reproducible accuracy and performance on enterprise-grade infrastructure.
13.61k
stars
3.24k
forks
source link
VOLTEI COM TUDO PROJETO ANTI APAGÃO VIRTUAL.. CHUPA MICROSOFT #1408
import pandas as pd import numpy as np import requests
def collect_data(api_url): response = requests.get(api_url) data = response.json() df = pd.DataFrame(data) return df
Exemplo de URL de API para coleta de dados
api_url = "https://example.com/api/system_status" data = collect_data(api_url)from sklearn.ensemble import IsolationForest
def detect_anomalies(data): model = IsolationForest(contamination=0.01) model.fit(data) data['anomaly'] = model.predict(data) anomalies = data[data['anomaly'] == -1] return anomalies
anomalies = detect_anomalies(data)from twilio.rest import Client
def send_alert(anomalies): if not anomalies.empty: account_sid = 'your_account_sid' auth_token = 'your_auth_token' client = Client(account_sid, authtoken) message = client.messages.create( body=f"Anomalias detectadas: {anomalies}", from='+1234567890', to='+0987654321' ) print(message.sid)
send_alert(anomalies) from flask import Flask, render_template import pandas as pd
app = Flask(name)
@app.route('/') def index(): data = collect_data(api_url) anomalies = detect_anomalies(data) return render_template('index.html', tables=[data.to_html(classes='data', header="true"), anomalies.to_html(classes='data', header="true")])
if name == 'main': app.run(debug=True)from apscheduler.schedulers.blocking import BlockingScheduler
scheduler = BlockingScheduler()
@scheduler.scheduled_job('interval', minutes=5) def scheduled_job(): data = collect_data(api_url) anomalies = detect_anomalies(data) send_alert(anomalies)
scheduler.start() <!DOCTYPE html>
Status dos Sistemas
Dados Coletados
Anomalias Detectadas