fork download
  1. import numpy as np
  2.  
  3. # Define inputs for all truth tables
  4. inputs = np.array([
  5. [0, 0],
  6. [0, 1],
  7. [1, 0],
  8. [1, 1]
  9. ])
  10.  
  11. # Define all possible truth tables as target outputs
  12. truth_tables = [
  13. [0, 0, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0], [0, 0, 1, 1],
  14. [0, 1, 0, 0], [0, 1, 0, 1], [0, 1, 1, 0], [0, 1, 1, 1],
  15. [1, 0, 0, 0], [1, 0, 0, 1], [1, 0, 1, 0], [1, 0, 1, 1],
  16. [1, 1, 0, 0], [1, 1, 0, 1], [1, 1, 1, 0], [1, 1, 1, 1]
  17. ]
  18.  
  19. # Define the perceptron learning function
  20. def perceptron_learning(inputs, targets, learning_rate=0.1, max_epochs=100):
  21. for attempt in range(10): # Allow up to 10 restarts
  22. # Initialize weights and bias with random values (can be negative)
  23. weights = np.random.uniform(-1, 1, inputs.shape[1])
  24. bias = np.random.uniform(-1, 1)
  25.  
  26. for epoch in range(max_epochs):
  27. error_count = 0
  28. for i, input_vector in enumerate(inputs):
  29. # Compute the perceptron output
  30. linear_combination = np.dot(input_vector, weights) + bias
  31. output = 1 if linear_combination >= 0 else 0
  32.  
  33. # Calculate the error
  34. error = targets[i] - output
  35.  
  36. # Update weights and bias if there is an error
  37. if error != 0:
  38. weights += learning_rate * error * input_vector
  39. bias += learning_rate * error
  40. error_count += 1
  41.  
  42. # If no errors, learning is complete
  43. if error_count == 0:
  44. return weights, bias, epoch + 1 # Successful learning
  45.  
  46. # Reset weights and bias for a new attempt
  47. print(f"Failed to learn after {max_epochs} epochs. Restarting with new weights and bias...")
  48.  
  49. # If unable to learn after all attempts, return failure
  50. print("Perceptron failed to learn this truth table.")
  51. return None, None, None
  52.  
  53. # Test perceptron for all truth tables
  54. results = []
  55. for idx, table in enumerate(truth_tables):
  56. print(f"\n=== Truth Table {idx + 1}: Targets = {table} ===")
  57. weights, bias, epochs = perceptron_learning(inputs, table)
  58.  
  59. if weights is not None:
  60. # Test the final perceptron on inputs
  61. outputs = [1 if np.dot(input_vector, weights) + bias >= 0 else 0 for input_vector in inputs]
  62.  
  63. # Display results
  64. print(f"Final Weights: {weights}")
  65. print(f"Final Bias: {bias}")
  66. print(f"Epochs to Learn: {epochs}")
  67. print(f"Final Test Output: {outputs}")
  68.  
  69. # Store the results
  70. results.append({
  71. "truth_table": table,
  72. "weights": weights.tolist(),
  73. "bias": bias,
  74. "epochs": epochs,
  75. "outputs": outputs
  76. })
  77. else:
  78. print("Learning failed for this truth table.")
  79. results.append({
  80. "truth_table": table,
  81. "weights": None,
  82. "bias": None,
  83. "epochs": None,
  84. "outputs": None
  85. })
  86.  
Success #stdin #stdout 0.2s 28864KB
stdin
Standard input is empty
stdout
=== Truth Table 1: Targets = [0, 0, 0, 0] ===
Final Weights: [ 0.41887061 -0.34704004]
Final Bias: -0.4372477229428532
Epochs to Learn: 4
Final Test Output: [0, 0, 0, 0]

=== Truth Table 2: Targets = [0, 0, 0, 1] ===
Final Weights: [0.2766353  0.76365759]
Final Bias: -0.9620232619558886
Epochs to Learn: 6
Final Test Output: [0, 0, 0, 1]

=== Truth Table 3: Targets = [0, 0, 1, 0] ===
Final Weights: [ 0.07916369 -0.58534058]
Final Bias: -0.03036235397057227
Epochs to Learn: 6
Final Test Output: [0, 0, 1, 0]

=== Truth Table 4: Targets = [0, 0, 1, 1] ===
Final Weights: [ 0.39361101 -0.2748075 ]
Final Bias: -0.01443476882199543
Epochs to Learn: 10
Final Test Output: [0, 0, 1, 1]

=== Truth Table 5: Targets = [0, 1, 0, 0] ===
Final Weights: [-0.1006732   0.53012766]
Final Bias: -0.4530586827476426
Epochs to Learn: 7
Final Test Output: [0, 1, 0, 0]

=== Truth Table 6: Targets = [0, 1, 0, 1] ===
Final Weights: [0.05595831 0.17259464]
Final Bias: -0.09267415163055348
Epochs to Learn: 4
Final Test Output: [0, 1, 0, 1]

=== Truth Table 7: Targets = [0, 1, 1, 0] ===
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Perceptron failed to learn this truth table.
Learning failed for this truth table.

=== Truth Table 8: Targets = [0, 1, 1, 1] ===
Final Weights: [0.08362556 0.02328936]
Final Bias: -0.0005643355625956359
Epochs to Learn: 8
Final Test Output: [0, 1, 1, 1]

=== Truth Table 9: Targets = [1, 0, 0, 0] ===
Final Weights: [-0.0492409  -0.10376759]
Final Bias: 0.031147041978318496
Epochs to Learn: 18
Final Test Output: [1, 0, 0, 0]

=== Truth Table 10: Targets = [1, 0, 0, 1] ===
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Failed to learn after 100 epochs. Restarting with new weights and bias...
Perceptron failed to learn this truth table.
Learning failed for this truth table.

=== Truth Table 11: Targets = [1, 0, 1, 0] ===
Final Weights: [ 0.00483802 -0.82107872]
Final Bias: 0.05148154639064506
Epochs to Learn: 4
Final Test Output: [1, 0, 1, 0]

=== Truth Table 12: Targets = [1, 0, 1, 1] ===
Final Weights: [ 0.07869752 -0.99464246]
Final Bias: 0.9866698669056061
Epochs to Learn: 10
Final Test Output: [1, 0, 1, 1]

=== Truth Table 13: Targets = [1, 1, 0, 0] ===
Final Weights: [-0.20561694  0.04820611]
Final Bias: 0.045978870026403856
Epochs to Learn: 6
Final Test Output: [1, 1, 0, 0]

=== Truth Table 14: Targets = [1, 1, 0, 1] ===
Final Weights: [-0.32141478  0.82720322]
Final Bias: 0.04775344049392691
Epochs to Learn: 7
Final Test Output: [1, 1, 0, 1]

=== Truth Table 15: Targets = [1, 1, 1, 0] ===
Final Weights: [-0.20656687 -0.23461108]
Final Bias: 0.36266363324118867
Epochs to Learn: 12
Final Test Output: [1, 1, 1, 0]

=== Truth Table 16: Targets = [1, 1, 1, 1] ===
Final Weights: [-0.37899237 -0.18786259]
Final Bias: 0.6481963306246683
Epochs to Learn: 4
Final Test Output: [1, 1, 1, 1]