top of page

AI part.

Welcome visitors to your site with a short, engaging introduction. Double click to edit and add your own text.

Dacon AI경진 대회 참가 1871명중 46위

image.png

TensorFolw-Keras

비연속 data( logistics keras)

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam

#당뇨 수치를 읽어옴

data = np.loadtxt('diabetes1.csv',delimiter=',',dtype=np.float32)
data.shape

x = data[:,:8] #당료 요인들
y = data[:,[8]] # 당료 인지 아닌지 결과값

#keras로 학습 시킴

IO = Dense(units=1, input_shape=[8],activation='sigmoid')
model = Sequential([IO])
model.compile(loss='mean_squared_error',optimizer = Adam(0.1),metrics=['accuracy'])
history = model.fit(x,y,epochs=100)

​#변경 인자를 넣어서 당뇨인지 아닌지 확인(o,x일때 쓰는 logstics)

model.predict(x)

result = model.predict_classes(x)

연속 data( mutilinear) - gradient descent 경사 하강법

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt

from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam

#target: Sepal Length
#feature: Sepal Width, Petal Length,Petal Width
#sw:3.1,pl:1.5,pw:0.2 인경우 SL 예측하시요

iris = datasets.load_iris()

data = iris['data'] 

x_data = data[:,1:]
y_data = data[:,[0]]

#keras로 학습 시킴

IO = Dense( units=1, input_shape=[3] )
model = Sequential( [IO] )
model.compile( loss='mean_squared_error',
            optimizer=Adam(0.01) ) #"adam"
history= model.fit( x_data, y_data, epochs=1000 )

​#변경 인자를 넣어서 예측값 확인

model.predict( np.array( [ [3.1,1.5,0.2] ] ) )

hy = model.predict( x_data )

#예측값 비교 그래프​

plt.plot( y_data ,'b-')
plt.plot( hy ,'r-')
plt.show()

특정값 이상이면 빠져나오기

#import한다

from tensorflow.keras.callbacks import EarlyStopping

earlyStop =  EarlyStopping(monitor='loss',patience=50)

#modefit에 callback을 list로 준다

history = model.fit( C,F, epochs=10000,callbacks=[earlyStop])

정규화 하기

from sklearn import preprocessing

scale = preprocessing.MinMaxScaler()
dataN =  scale.fit_transform(data)

MultiNormimal Classfication

import numpy as np
import tensorflow as tf

data =np.loadtxt('softmax.txt',dtype=np.float32)

x =  data[:,1:3]
y =  data[:,3:]

z=tf.matmul(x,w)+b
hx =tf.nn.softmax(z)

 

cost_i = tf.nn.softmax_cross_entropy_with_logits_v2(logits =z, labels = y) # softmax로 확률 구하기
cost = tf.reduce_mean(cost_i)

X =tf.placeholder(tf.float32)
w = tf.Variable(tf.zeros([2,3])) #입력 2개 분류 3개
b = tf.Variable(tf.random_uniform([1]))

optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer())

for i in range(100):
    sess.run(train,{X:x})

MultiNormimal Classfication ->Keras버전

from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
from tensorflow.keras.optimizers import Adam

IO = Dense(units = 3, input_shape=[2], activation='softmax')
model = Sequential([IO])
model.compile(loss='categorical_crossentropy',optimizer=Adam(0.01),metrics=['accuracy'])
history = model.fit(x,y,epochs=1000)

model.predict(np.array([[3,6]])) #3시간 공부하고 6시간 출석했을때 예상 학점

grades=['A','b','c']

print(grades[np.argmax(p)])

뉴럴 네트웍

일반 케라그 버전 정확도 50%

import tensorflow as tf
import numpy as np

from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential

x_data = np.array([[0, 0], [0, 1], [1, 0], [1, 1]], dtype=np.float32)
y_data = np.array([[0], [1], [1], [0]], dtype=np.float32)

model = Sequential()
model.add(Dense(units=1,input_shape=[2],activation='sigmoid'))
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy'])
h=model.fit(x_data,y_data,epochs=100)

model.predict_classes(x_data)

-->뉴럴 네트웍 적용

modelNN=Sequential()
modelNN.add(Dense(units=10,input_shape=[2], activation='sigmoid')) #2개 입력 10개 출력
modelNN.add(Dense(units=10,input_shape=[10], activation='sigmoid'))
modelNN.add(Dense(units=1,input_shape=[10], activation='sigmoid')) #10개 입력 1개 출력
modelNN.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy'])

h=modelNN.fit(x_data,y_data,epochs=1000)

​이미지 train

mnist=input_data.read_data_sets('MNIST_data/', 
                                one_hot=True)

x_train = mnist.train.images
y_train = mnist.train.labels
x_test = mnist.test.images
y_test = mnist.test.labels

X = tf.placeholder( tf.float32,[None,784] )
Y = tf.placeholder( tf.float32,[None,10] )
# W1 = tf.Variable( tf.random_normal([784,100] ) )
W1 = tf.get_variable('w1', [784,100],
    initializer=tf.contrib.layers.xavier_initializer())
# b1 = tf.Variable( tf.random_normal([100] ) )
b1 = tf.get_variable('b1', [100],
     initializer=tf.contrib.layers.xavier_initializer())
W2 = tf.get_variable('w2', [100,10],
    initializer=tf.contrib.layers.xavier_initializer())
b2 = tf.get_variable('b2', [10],
     initializer=tf.contrib.layers.xavier_initializer())

Z1 = tf.matmul( X, W1) + b1
hy1 = tf.nn.relu( Z1 )
Z2 = tf.matmul( hy1, W2) + b2
hy = tf.nn.softmax( Z2 )

cross_ent = tf.nn.softmax_cross_entropy_with_logits_v2(
    logits=Z2, labels= Y)
loss = tf.reduce_mean( cross_ent )
optimizer = tf.train.GradientDescentOptimizer(0.1)
train = optimizer.minimize(loss )  

sess = tf.Session()
sess.run(tf.global_variables_initializer())

for i in range( 1000 ):
    sess.run( train, {X:x_train, Y:y_train })
    if i%100==0:
        print( i, sess.run(loss, 
                    {X:x_train, Y:y_train }))

import matplotlib.pyplot as plt
plt.imshow( x_test[0].reshape(28,28) )
plt.show()

y_test[0].argmax()

result = sess.run( hy, {X: [x_test[0]] })
result.argmax()
# result>0.5

hyx = sess.run( hy, {X:x_test})

aa = hyx.argmax( axis=1)

bb = y_test.argmax( axis=1 )

(aa==bb).mean()

from tensorflow.keras.layers import Dense
from tensorflow.keras import Sequential
import pandas as pd

model = Sequential()
model.add(Dense(units=64, input_dim=784,activation='relu'))
model.add(Dense(units=10, activation='softmax'))
model.compile(loss='categorical_crosssentropy',optimizer='adam',metrix=['accuracy'])
hist = model.fit(x_train,y_train,epochs=100)
 

bottom of page