Projects

Camera Analysis for Anomaly Detection

Description

This project analyzes images from any security camera to detect anomalies using advanced image processing and AI algorithms.

Frameworks

Python, TensorFlow, OpenCV

Usage

Applicable in security systems, autonomous vehicles, and anywhere with camera

LSTM Crypto Price Prediction

Description

This example demonstrates the general approach for training a model by using LSTM. Also, it includes a hypothetical real-time structure.

Frameworks

Python, CCXT, Scikit, TensorFlow, Keras

Usage

Algorithmic Trading

from binance.client import Client
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
import joblib

# Initialize Binance Client
client = Client(None, None)

# Function to download and merge data
def download_and_merge_data(symbol, interval, start_date, end_date):
    # Download USDT Parity Data
    usdt_symbol = symbol + "USDT"
    usdt_data = client.get_historical_klines(usdt_symbol, interval, start_date, end_date)
    usdt_df = pd.DataFrame(usdt_data, columns=headers)

    # Download BTC Parity Data
    btc_symbol = symbol + "BTC"
    btc_data = client.get_historical_klines(btc_symbol, interval, start_date, end_date)
    btc_df = pd.DataFrame(btc_data, columns=headers)

    # Rename columns for BTC parity
    btc_df.rename(columns={
        'Open': 'OpenBTC', 
        'High': 'HighBTC', 
        'Low': 'LowBTC', 
        'Close': 'CloseBTC', 
        'Volume': 'VolumeBTC'
    }, inplace=True)

    # Merge on Open Time
    merged_data = usdt_df.merge(btc_df, on='Open Time', suffixes=('', '_BTC'))
    return merged_data.drop(columns=["QAV", "NAT", "TBBAV", "TBQAV", "Ignore"])

# Function to calculate RSI
def calculate_rsi(data, window=14):
    delta = data['Close'].diff()
    gain = (delta.where(delta > 0, 0)).rolling(window=window).mean()
    loss = (-delta.where(delta < 0, 0)).rolling(window=window).mean()

    rs = gain / loss
    rsi = 100 - (100 / (1 + rs))
    return rsi

# Function to create sequences
def create_sequences(features, labels, sequence_length):
    xs, ys = [], []
    for i in range(len(features) - sequence_length):
        xs.append(features[i:(i + sequence_length)])
        ys.append(labels[i])
    return np.array(xs), np.array(ys)

# Parameters
interval = Client.KLINE_INTERVAL_5MINUTE
start_date = "1 January 2023"
end_date = "8 December 2023"
headers = ["Open Time", "Open", "High", "Low", "Close", "Volume", "Close Time", "QAV", "NAT", "TBBAV", "TBQAV", "Ignore"]
sequence_length = 10
coins = ["ETH"]  # List of coins

# Initialize scalers
feature_scaler = StandardScaler()
label_scaler = StandardScaler()

# Process each coin
all_features, all_labels = [], []
for coin in coins:
    coin_df = download_and_merge_data(coin, interval, start_date, end_date)

    # Convert timestamps and drop unnecessary columns
    coin_df['Open Time'] = pd.to_datetime(coin_df['Open Time'], unit='ms')
    coin_df['Close Time'] = pd.to_datetime(coin_df['Close Time'], unit='ms')
    coin_df = coin_df.drop(columns=["QAV_BTC", "NAT_BTC", "TBBAV_BTC", "TBQAV_BTC", "Ignore_BTC","Close Time_BTC"])

    # Convert numerical columns to numeric type
    for col in [col for col in coin_df.columns if col not in ['Open Time', 'Close Time']]:
        coin_df[col] = pd.to_numeric(coin_df[col])

    # Replace any infinite values with NaN and drop rows with NaN values
    coin_df.replace([np.inf, -np.inf], np.nan, inplace=True)
    coin_df.dropna(inplace=True)

    # Calculate technical indicators
    coin_df['EMA55'] = coin_df['Close'].ewm(span=55, adjust=False).mean()
    coin_df['EMA5'] = coin_df['Close'].ewm(span=5, adjust=False).mean()
    coin_df['EMA10'] = coin_df['Close'].ewm(span=10, adjust=False).mean()
    coin_df['SMA9'] = coin_df['Close'].rolling(window=9).mean()
    coin_df['Above_SMA9'] = (coin_df['Close'] > coin_df['SMA9']).astype(int)
    coin_df['Above_EMA55'] = (coin_df['Close'] > coin_df['EMA55']).astype(int)
    window_size = 10
    coin_df['Volume_Price_Correlation'] = coin_df['Volume'].rolling(window=window_size).corr(coin_df['Close'])
    coin_df['RSI'] = calculate_rsi(coin_df)
    coin_df.replace([np.inf, -np.inf], np.nan, inplace=True)
    coin_df.dropna(inplace=True)

    # Prepare labels for sequence creation
    labels_high = coin_df['High'].shift(-sequence_length)
    labels_low = coin_df['Low'].shift(-sequence_length)

    # Combine labels
    labels = np.column_stack([labels_high, labels_low])

    # Dynamic Feature Selection and Normalization
    features = coin_df.drop(columns=['Open Time', 'Close Time'])
    scaled_features = feature_scaler.fit_transform(features)

    # Combine features and labels for each coin
    all_features.append(scaled_features)
    all_labels.append(labels)

# Combine all features and labels from different coins
all_features = np.concatenate(all_features, axis=0)
all_labels = np.concatenate(all_labels, axis=0)

# Handle NaN in labels
mask = ~np.isnan(all_labels).any(axis=1) 
all_features = all_features[mask]
all_labels = all_labels[mask]

# Scale labels
y_scaled = label_scaler.fit_transform(labels.reshape(-1, 2))

# Split into training and testing sets
train_size = int(len(all_features) * 0.8)
X_train_unscaled, X_test_unscaled = all_features[:train_size], all_features[train_size:]
y_train, y_test = y_scaled[:train_size], y_scaled[train_size:]

# Create sequences for training and testing sets
X_train, y_train_seq = create_sequences(X_train_unscaled, y_train, sequence_length)
X_test, y_test_seq = create_sequences(X_test_unscaled, y_test, sequence_length)

from binance.client import Client
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import TensorBoard
import datetime
from keras_tuner import HyperModel
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.optimizers import Adam
y_train_seq = y_train_seq[:len(X_train)]
y_test_seq = y_test_seq[:len(X_test)]

indices = np.arange(X_train.shape[0])
np.random.shuffle(indices)

# Shuffle the training data using the generated indices
X_train_shuffled = X_train[indices]
y_train_shuffled = y_train_seq[indices]
log_dir = "logs/fit1/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.regularizers import l1_l2
import tensorflow.keras.backend as K

def mase(y_true, y_pred):
    # Calculate the MAE for the predictions
    mae = K.mean(K.abs(y_pred - y_true))

    # Calculate the MAE of the naive forecast (naive forecast = last observed value)
    mae_naive = K.mean(K.abs(y_true[1:] - y_true[:-1]))

    # Prevent division by zero
    mae_naive = K.maximum(mae_naive, K.epsilon())

    # Calculate MASE
    return mae / mae_naive

dropout_rate = 0.2
l1_reg = 1e-5
l2_reg = 1e-4

# Build the model
model = Sequential()

# First LSTM layer with L1/L2 regularization
model.add(LSTM(units=256, return_sequences=True,
               activation='swish', recurrent_activation='sigmoid'))
#model.add(Dropout(dropout_rate))
# Second LSTM layer
model.add(LSTM(units=128, return_sequences=False, 
               activation='swish', recurrent_activation='sigmoid'))

# Dense layer for output
model.add(Dense(2))

model.compile(optimizer='adam', loss='mse', metrics=[mase])
joblib.dump(feature_scaler, 'feature_scaler.pkl')
joblib.dump(label_scaler, 'label_scaler.pkl')
from tensorflow.keras.callbacks import ModelCheckpoint

checkpoint_callback = ModelCheckpoint(
    'model5.keras',  # Path where to save the model
    monitor='val_mase',  # Monitor the validation loss
    verbose=1,  # Verbosity mode
    save_best_only=True,  # Save only when the monitored metric has improved
    mode='min'  # The monitoring mode ('min' for minimization)
)

history = model.fit(
    X_train_shuffled, y_train_shuffled, 
    epochs=70, 
    validation_data=(X_test, y_test_seq),
    callbacks=[tensorboard_callback, checkpoint_callback]  # Include checkpoint_callback here
)
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import TensorBoard
import datetime
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import custom_object_scope
from sklearn.metrics import mean_squared_error

with custom_object_scope({'mase': mase}):
    model = load_model('model5.keras')
feature_scaler = joblib.load('feature_scaler.pkl')
label_scaler = joblib.load('label_scaler.pkl')
pred = model.predict(X_test)
print("Shape of pred:", pred.shape)
# Reshape predictions to 2D if necessary
if pred.ndim == 2 and pred.shape[1] == 1:
    # If the predictions are of shape (389, 1), this indicates an issue with the model's output layer
    raise ValueError("Model is not outputting predictions for both 'High' and 'Low' prices.")


# If your target variable was scaled, inverse transform the predictions
inverse_pred = label_scaler.inverse_transform(pred)

predicted_high = inverse_pred[:, 0]
predicted_low = inverse_pred[:, 1]

# Reshape y_test_seq to 2D for inverse scaling and metrics
y_test_seq_reshaped = y_test_seq.reshape(-1, 2)

# Inverse transform the actual target values in the test set
inverse_test_y = label_scaler.inverse_transform(y_test_seq_reshaped)

# Calculate Normalized RMSE (on scaled data)
norm_rmse = np.sqrt(mean_squared_error(y_test_seq, pred))
print('Normalized RMSE:', norm_rmse)

# Calculate Absolute RMSE (on original scale data)
abs_rmse = np.sqrt(mean_squared_error(inverse_test_y, inverse_pred))
print('Absolute RMSE:', abs_rmse)

# Define Mean Absolute Percentage Error function
def mean_absolute_percentage_error(y_true, y_pred): 
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    return np.mean(np.abs((y_true - y_pred) / y_true)) * 100

# Calculate Normalized MAPE
norm_mape = mean_absolute_percentage_error(y_test_seq, pred)
print('Normalized MAPE:', norm_mape, '%')

# Calculate Absolute MAPE
abs_mape = mean_absolute_percentage_error(inverse_test_y, inverse_pred)
print('Absolute MAPE:', abs_mape, '%')
import matplotlib.pyplot as plt

# Assuming inverse_test_y[:, 0] is the true high and inverse_test_y[:, 1] is the true low
true_high = inverse_test_y[:, 0]
true_low = inverse_test_y[:, 1]

# Plotting the results
plt.figure(figsize=(12, 6))
plt.plot(true_high, label='Actual High', color='blue')
plt.plot(predicted_high, label='Predicted High', color='red', alpha=0.7)
plt.plot(true_low, label='Actual Low', color='green')
plt.plot(predicted_low, label='Predicted Low', color='orange', alpha=0.7)
plt.title('Comparison of Actual and Predicted High/Low Prices')
plt.xlabel('Time Steps')
plt.ylabel('Price')
plt.legend()
plt.show()
import pandas as pd
import numpy as np
import datetime as dt
import pytz
import time
from binance.client import Client
from sklearn.metrics import mean_squared_error
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from tensorflow.keras.callbacks import TensorBoard
import numpy as np
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import custom_object_scope
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
from keras_tuner import HyperModel
import joblib
import pandas as pd
import numpy as np
import datetime as dt
import pytz
import time
from binance.client import Client

client = Client(None, None)

headers = ["Open Time", "Open", "High", "Low", "Close", "Volume", "Close Time", "QAV", "NAT", "TBBAV", "TBQAV", "Ignore"]

def mase(y_true, y_pred):
    # Calculate the MAE for the predictions
    mae = K.mean(K.abs(y_pred - y_true))

    # Calculate the MAE of the naive forecast (naive forecast = last observed value)
    mae_naive = K.mean(K.abs(y_true[1:] - y_true[:-1]))

    # Prevent division by zero
    mae_naive = K.maximum(mae_naive, K.epsilon())

    # Calculate MASE
    return mae / mae_naive

def calculate_rsi(data, window=14):
    delta = data['Close'].diff()
    gain = (delta.where(delta > 0, 0)).rolling(window=window).mean()
    loss = (-delta.where(delta < 0, 0)).rolling(window=window).mean()

    rs = gain / loss
    rsi = 100 - (100 / (1 + rs))
    return rsi

def download_and_merge_data(symbol, interval, start_date, end_date):
    # Download USDT Parity Data
    usdt_symbol = symbol + "USDT"
    usdt_data = client.get_historical_klines(usdt_symbol, interval, limit=30)
    usdt_df = pd.DataFrame(usdt_data, columns=headers)

    # Download BTC Parity Data
    btc_symbol = symbol + "BTC"
    btc_data = client.get_historical_klines(btc_symbol, interval, limit=30)
    btc_df = pd.DataFrame(btc_data, columns=headers)

    # Rename columns for BTC parity
    btc_df.rename(columns={
        'Open': 'OpenBTC', 
        'High': 'HighBTC', 
        'Low': 'LowBTC', 
        'Close': 'CloseBTC', 
        'Volume': 'VolumeBTC'
    }, inplace=True)

    # Merge on Open Time
    merged_data = usdt_df.merge(btc_df, on='Open Time', suffixes=('', '_BTC'))
    return merged_data.drop(columns=["QAV", "NAT", "TBBAV", "TBQAV", "Ignore"])

def preprocess_realtime_data(data):
    # Assuming the data includes both USDT and BTC parity data merged

    # Convert timestamps
    data['Open Time'] = pd.to_datetime(data['Open Time'], errors='coerce')
    data['Close Time'] = pd.to_datetime(data['Close Time'], errors='coerce')

    # Drop unnecessary columns
    data = data.drop(columns=["QAV_BTC", "NAT_BTC", "TBBAV_BTC", "TBQAV_BTC", "Ignore_BTC", "Close Time_BTC"])

    # Convert numerical columns to numeric type
    for col in [col for col in data.columns if col not in ['Open Time', 'Close Time']]:
        data[col] = pd.to_numeric(data[col])

    # Replace any infinite values with NaN and drop rows with NaN values
    data.replace([np.inf, -np.inf], np.nan, inplace=True)
    data.dropna(inplace=True)

    # Calculate technical indicators as done during training
    data['EMA55'] = data['Close'].ewm(span=55, adjust=False).mean()
    data['EMA5'] = data['Close'].ewm(span=5, adjust=False).mean()
    data['EMA10'] = data['Close'].ewm(span=10, adjust=False).mean()
    data['SMA9'] = data['Close'].rolling(window=9).mean()
    data['Above_SMA9'] = (data['Close'] > data['SMA9']).astype(int)
    data['Above_EMA55'] = (data['Close'] > data['EMA55']).astype(int)
    window_size = 10
    data['Volume_Price_Correlation'] = data['Volume'].rolling(window=window_size).corr(data['Close'])
    data['RSI'] = calculate_rsi(data)

    # Select and return the relevant features used in your model
    return data[['Open','High','Low','Close','Volume','OpenBTC','HighBTC','LowBTC','CloseBTC','VolumeBTC','EMA55','EMA5','EMA10','SMA9','Above_SMA9','Above_EMA55','Volume_Price_Correlation','RSI']]


def fetch_latest_candles(symbol, interval, start_date='1 day ago UTC', end_date='now UTC'):
    return download_and_merge_data(symbol, interval, start_date, end_date)

# Initialize data_queue with the latest 14 candlesticks
coin = "ETH"
interval = Client.KLINE_INTERVAL_5MINUTE
start_date = "1 day ago UTC"
end_date = "now UTC"

# Fetch initial data to start the data_queue
initial_data = fetch_latest_candles(coin, interval, start_date, end_date)
data_queue = initial_data.tail(30)

with custom_object_scope({'mase': mase}):
    model = load_model('model5.keras')
feature_scaler = joblib.load('feature_scaler.pkl')
label_scaler = joblib.load('label_scaler.pkl')

# Timezone setup
tz = pytz.timezone('Turkey')

# Initialize variables for tracking BUY/SELL and P/L
predictions = [0, 0]
temp = []
p_and_l = 0
_df = []
p = None
count = 0

while True:
    current_time = dt.datetime.now(tz=tz)
    if current_time.minute % 5 == 0 and current_time.second == 0:
        try:
            # Fetch the latest candlestick data
            latest_candles = fetch_latest_candles(coin, interval, start_date, end_date)
            new_candle = latest_candles.iloc[-1]  # Get the most recent candle

            data_queue = pd.concat([data_queue, pd.DataFrame([new_candle])]).tail(30)

            # Process and predict
            processed_data = preprocess_realtime_data(data_queue)
            scaled_data = feature_scaler.transform(processed_data)
            data_sequence = scaled_data[-10:]  # Assuming your model expects sequences of length 10
            data_sequence = np.reshape(data_sequence, (1, 10, -1))
            pred = model.predict(data_sequence)
            inv_pred = np.rint(label_scaler.inverse_transform(pred))
            print("Prediction -> ", inv_pred, '\n')

            predictions.pop(0)
            predictions.append(inv_pred)

            _p = p

            # Update the current price based on the new candle
            price = float(new_candle['Close'])

            print("Prediction is: ", predictions[1] - predictions[0])
            if len(predictions) > 1 and (predictions[1] - predictions[0] > 0):
                p = 1
                _ = 'BUY'
            else:
                p = 0
                _ = 'SELL'

            temp.append(price)
            print("temp -> ", temp)

            if _p is not None and _p != p:
                # Convert string values in 'temp' to floats before subtraction
                if p == 1:
                    p_and_l += float(temp[0]) - float(temp[-1])
                elif p == 0:
                    p_and_l += float(temp[-1]) - float(temp[0])

                temp = []
                p = None

            _df.append([_, temp, p_and_l, dt.datetime.now(tz=tz)])

            count += 1
        except ValueError as e:
            print("Error:", e)

        time.sleep(60)  # Wait to avoid multiple triggers within the same minute
    time.sleep(1)

HFT Trade Bot

Description

This example demonstrates the general structure of market orders and order parameters. In this bot, moving averages are used to determine buying and selling. It is set to 1 minute interval, which is changable.

Frameworks

Python, CCXT, ta-lib

Usage

Algorithmic Trading

import ccxt
import pandas as pd
import time
import pytz
from decimal import Decimal
from datetime import datetime
import numpy as np

api_key = 'xxxxxxx'
secret_key = 'xxxxxxx'

exchange = ccxt.binance({
    'apiKey': api_key,
    'secret': secret_key,
    'enableRateLimit': True,
    'options': {
        'defaultType': 'future'
    }
})

symbol = 'BTC/USDT'  # symbol to trade
timeframe='1m'
leverage = 10  # leverage
previous_state = None  # keep track of the previous state

mafast = 1
maslow = 2
masafety = 55
ma_htf = 9
matype='EMA'
higher_timeframe1 = '1s'
params1 = {
    'price': 'mark'
}

def convert_to_utc_plus_3(timestamp):
    utc = pytz.UTC
    utc_plus_3 = pytz.timezone('Etc/GMT-3')
    localized_timestamp = utc.localize(timestamp)
    return localized_timestamp.astimezone(utc_plus_3)

def print_signal(signal):
    local_timestamp = convert_to_utc_plus_3(signal['timestamp'])
    time_str = local_timestamp.strftime('%H:%M:%S')
    print(f"Timestamp: {time_str}, Signal: {signal['signal']}")
    
def get_historical_data(timeframe):
    ohlcv = exchange.fetch_ohlcv(symbol, timeframe)
    data = []
    for o in ohlcv:
        data.append([o[0], o[1], o[2], o[3], o[4], o[5]])
    df = pd.DataFrame(data, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
    df['timestamp'] = pd.to_datetime(df['timestamp'], unit='ms')
    return df

def calculate_moving_average(matype, data, window):
    if matype == 'SMA':
        return data.rolling(window).mean()
    elif matype == 'EMA':
        return data.ewm(span=window).mean()
    elif matype == 'WMA':
        weights = np.arange(1, window+1)
        return data.rolling(window).apply(lambda x: np.dot(x, weights) / weights.sum(), raw=True)
    else:
        return None

def detect_signals(df):
    df['crs_buy'] = df['avgma_fast'].gt(df['avgma_safety'].shift(1)) & df['avgma_fast'].shift(1).le(df['avgma_safety'].shift(1))
    df['crs_sell'] = df['avgma_fast'].lt(df['avgma_safety'].shift(1)) & df['avgma_fast'].shift(1).ge(df['avgma_safety'].shift(1))
    return df

def resample_to_higher_timeframe(df, higher_timeframe1):
    df = df.set_index('timestamp')
    df = df.resample(higher_timeframe1).agg({'open': 'first', 'high': 'max', 'low': 'min', 'close': 'last', 'volume': 'sum'})
    df = df.dropna()
    df = df.reset_index()
    return df

def calculate_htf_moving_average(df, ma_htf):
    df['avgma_htf'] = df['close'].ewm(span=ma_htf).mean()
    return df

def calculate_safety_zones(df):
    df['safety_zone'] = df['avgma_fast'].ge(df['avgma_slow']) & df['avgma_slow'].ge(df['avgma_safety'])
    df['unsafety_zone'] = df['avgma_fast'].le(df['avgma_slow']) & df['avgma_slow'].le(df['avgma_safety'])
    return df

def set_leverage(leverage):
    params = {'symbol': 'BTCUSDT', 'leverage': leverage}
    return exchange.fapiPrivate_post_leverage(params)

def create_order(symbol, side, amount, price=None):
    params = {'quoteOrderQty': amount}
    try:
        print(f"Creating {side} order with amount {amount}")
        exchange.create_order(symbol, 'market', side, str(amount), price, params)
        trades = exchange.fetch_my_trades(symbol)
        for trade in trades[-1:]:  # print the 5 most recent trades
            print(trade)
    except ccxt.InsufficientFunds as e:
        print('Insufficient funds', e)
    except Exception as e:
        print('An error occurred', e)

def get_balance():
    balance = exchange.fetch_balance()
    free_usdt = balance['free']['USDT']
    return free_usdt

def get_position():
    try:
        # replace 'BTC/USDT' with your actual trading symbol
        positions = exchange.fetch_positions(['BTC/USDT:USDT'])
        for position in positions:
            if position['symbol'] == 'BTC/USDT:USDT':
                return position
        return None
    except Exception as e:
        print("Error fetching position: ", e)
        return None

def get_fee_rate():
    fee = exchange.fetch_trading_fees()
    return str(fee['BTC/USDT:USDT']['taker'])

def get_ticker_price():
    ticker = exchange.fetch_ticker(symbol)
    price = ticker['last']
    return price

def adjust_for_fee_rate_and_price(amount):
    print(f"Adjusting amount {amount} for fee and price")
    amount = Decimal(amount)
    fee_rate = Decimal(get_fee_rate())
    ticker_price = Decimal(get_ticker_price())
    one = Decimal(1)
    adjusted_amount = (amount / ticker_price) * (one - fee_rate)
    return str(adjusted_amount.quantize(Decimal("1.00000")))

def trading_logic(current_state):
    global previous_state
    global latest_signal
    global latest_htf_signal

    if current_state == 'Safety Zone' and previous_state in ['Neutral Zone', 'Unsafety Zone'] :
        balance = get_balance()
        balance = float(adjust_for_fee_rate_and_price(balance))*0.99*leverage
        create_order('BTC/USDT:USDT','buy', balance)

    elif current_state == 'Neutral Zone' and previous_state == 'Safety Zone':
        position = get_position()
        if position:
            sell_qty = float(position['info']['positionAmt']) * 0.75
            create_order('BTC/USDT:USDT','sell', sell_qty)

    elif current_state == 'Unsafety Zone' and previous_state in ['Safety Zone', 'Neutral Zone']:
        position = get_position()
        if position:
            sell_qty = position['info']['positionAmt']
            create_order('BTC/USDT:USDT','sell', sell_qty)

    elif current_state == 'Neutral Zone' and previous_state == 'Unsafety Zone':
        balance = get_balance()
        buy_qty = balance * 0.25 * leverage
        buy_qty = adjust_for_fee_rate_and_price(buy_qty)
        create_order('BTC/USDT:USDT', 'buy', buy_qty)

    previous_state = current_state


def main():
    global latest_signal, latest_htf_signal
    latest_signal = None
    latest_htf_signal = None
    set_leverage(leverage)
    retries = 0
    max_retries = 5
    while True:
        try:
            current_time = exchange.fetch_time()
            current_time = datetime.utcfromtimestamp(current_time / 1000)  # convert from milliseconds to datetime
            if current_time.second < 5:  # start of a new minute, with a 5-second buffer
                df = get_historical_data(timeframe)
                df['avgma_fast'] = calculate_moving_average(matype, df['close'], mafast)
                df['avgma_slow'] = calculate_moving_average(matype, df['close'], maslow)
                df['avgma_safety'] = calculate_moving_average(matype, df['close'], masafety)
                df = detect_signals(df)
                df_htf = resample_to_higher_timeframe(df, higher_timeframe1)
                df_htf['avgma_htf'] = calculate_moving_average(matype, df_htf['close'], ma_htf)
                df = calculate_safety_zones(df)
                latest_signal = df.iloc[-1]
                latest_htf_signal = df_htf.iloc[-1]
                safety_zone = latest_signal['safety_zone']
                unsafety_zone = latest_signal['unsafety_zone']

                if safety_zone:
                    current_state = "Safety Zone"
                elif unsafety_zone:
                    current_state = "Unsafety Zone"
                else:
                    current_state = "Neutral Zone"
                    
                trading_logic(current_state)
                if (latest_signal['close'] > latest_htf_signal['avgma_htf']):
                    degis='dogru'
                else:
                    degis='yanlis'
                print(current_state, current_time, latest_htf_signal['avgma_htf'], latest_signal['close'], degis)
                time.sleep(5)  # sleep for 5 seconds to avoid performing the analysis multiple times in the buffer period
            else:
                time.sleep(1)  # sleep for 1 second
            retries = 0  # reset retries count after successful execution
        except ccxt.RequestTimeout:
            if retries < max_retries:
                retries += 1
                print(f"Request timed out, retrying ({retries}/{max_retries})...")
                time.sleep(1)  # wait for 1 second before retrying
            else:
                print("Maximum retries reached, stopping execution.")
                break  # exit the loop if maximum retries reached


if __name__ == '__main__':
    main()