Add files via upload
This commit is contained in:
parent
f1e69345d1
commit
0511d46773
|
@ -0,0 +1,84 @@
|
||||||
|
# Import necessary libraries
|
||||||
|
import numpy as np
|
||||||
|
import tensorflow as tf
|
||||||
|
from sklearn.linear_model import LinearRegression
|
||||||
|
from sklearn.datasets import make_classification
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.metrics import accuracy_score
|
||||||
|
from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
||||||
|
|
||||||
|
# Example 1: Reactive Machine (Simple Rule-Based System)
|
||||||
|
def reactive_machine(input_value):
|
||||||
|
if input_value > 0:
|
||||||
|
return "Positive"
|
||||||
|
else:
|
||||||
|
return "Negative"
|
||||||
|
|
||||||
|
# Example 2: Limited Memory (Simple Machine Learning Model)
|
||||||
|
def limited_memory_model():
|
||||||
|
# Generate a simple dataset
|
||||||
|
X, y = make_classification(n_samples=1000, n_features=20, random_state=42)
|
||||||
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
||||||
|
|
||||||
|
# Train a simple linear regression model
|
||||||
|
model = LinearRegression()
|
||||||
|
model.fit(X_train, y_train)
|
||||||
|
|
||||||
|
# Make predictions
|
||||||
|
predictions = model.predict(X_test)
|
||||||
|
predictions = [1 if p > 0.5 else 0 for p in predictions]
|
||||||
|
|
||||||
|
# Evaluate the model
|
||||||
|
accuracy = accuracy_score(y_test, predictions)
|
||||||
|
return accuracy
|
||||||
|
|
||||||
|
# Example 3: Theory of Mind (Simple Neural Network)
|
||||||
|
def theory_of_mind_model():
|
||||||
|
# Create a simple neural network
|
||||||
|
model = tf.keras.Sequential([
|
||||||
|
tf.keras.layers.Dense(128, activation='relu', input_shape=(20,)),
|
||||||
|
tf.keras.layers.Dense(64, activation='relu'),
|
||||||
|
tf.keras.layers.Dense(1, activation='sigmoid')
|
||||||
|
])
|
||||||
|
|
||||||
|
# Compile the model
|
||||||
|
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
|
||||||
|
|
||||||
|
# Generate a simple dataset
|
||||||
|
X, y = make_classification(n_samples=1000, n_features=20, random_state=42)
|
||||||
|
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
||||||
|
|
||||||
|
# Train the model
|
||||||
|
model.fit(X_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
|
||||||
|
|
||||||
|
# Evaluate the model
|
||||||
|
loss, accuracy = model.evaluate(X_test, y_test)
|
||||||
|
return accuracy
|
||||||
|
|
||||||
|
# Example 4: General AI (Advanced Language Model)
|
||||||
|
def general_ai_model(prompt):
|
||||||
|
# Load pre-trained GPT-2 model and tokenizer
|
||||||
|
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
|
||||||
|
model = GPT2LMHeadModel.from_pretrained("gpt2")
|
||||||
|
|
||||||
|
# Encode the input prompt
|
||||||
|
inputs = tokenizer.encode(prompt, return_tensors="pt")
|
||||||
|
|
||||||
|
# Generate a response
|
||||||
|
outputs = model.generate(inputs, max_length=100, num_return_sequences=1)
|
||||||
|
|
||||||
|
# Decode the response
|
||||||
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
||||||
|
return response
|
||||||
|
|
||||||
|
# Example 5: Self-Aware AI (Theoretical Concept)
|
||||||
|
def self_aware_ai():
|
||||||
|
return "Self-aware AI is a theoretical concept and not yet achievable with current technology."
|
||||||
|
|
||||||
|
# Main function to run examples
|
||||||
|
if __name__ == "__main__":
|
||||||
|
print("Reactive Machine Output:", reactive_machine(5))
|
||||||
|
print("Limited Memory Model Accuracy:", limited_memory_model())
|
||||||
|
print("Theory of Mind Model Accuracy:", theory_of_mind_model())
|
||||||
|
print("General AI Model Response:", general_ai_model("this is the future of AI?"))
|
||||||
|
print("Self-Aware AI:", self_aware_ai())
|
|
@ -0,0 +1,58 @@
|
||||||
|
name: CI/CD Pipeline
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
main
|
||||||
|
pull_request:
|
||||||
|
|
||||||
|
branches:
|
||||||
|
main
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
name: Checkout code
|
||||||
|
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
name: Set up Python
|
||||||
|
|
||||||
|
uses: actions/setup-python@v2
|
||||||
|
with:
|
||||||
|
python-version: 3.8.0
|
||||||
|
|
||||||
|
name: Install dependencies
|
||||||
|
|
||||||
|
run: |
|
||||||
|
python -m pip install --upgrade pip
|
||||||
|
pip install -r requirements.txt
|
||||||
|
|
||||||
|
name: Lint with flake8
|
||||||
|
|
||||||
|
run: |
|
||||||
|
pip install flake8
|
||||||
|
# stop the build if there are Python syntax errors or undefined names
|
||||||
|
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||||
|
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
|
||||||
|
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||||
|
|
||||||
|
name: Build Docker image
|
||||||
|
|
||||||
|
run: docker build -t my-flask-api .
|
||||||
|
|
||||||
|
name: Run tests
|
||||||
|
|
||||||
|
run: |
|
||||||
|
docker run -d -p 5000:5000 my-flask-api
|
||||||
|
# Add your test commands here
|
||||||
|
|
||||||
|
name: Push to Docker Hub
|
||||||
|
|
||||||
|
run: |
|
||||||
|
echo "${{ secrets.DOCKER_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_USERNAME }}" --password-stdin
|
||||||
|
docker tag my-flask-api:latest my-dockerhub-username/my-flask-api:latest
|
||||||
|
docker push my-dockerhub-username/my-flask-api:latest
|
Loading…
Reference in New Issue