level 1
度假的乞丐
楼主
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 22 14:31:50 2019
@author: Administrator
"""
import tensorflow as tf
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
import sklearn.preprocessing as preprocessing
from sklearn import linear_model
from sklearn.model_selection import train_test_split
data_train = pd.read_csv(r"C:\Users\Administrator\Desktop\titanic\train.csv")
data_train['Sex'] = data_train['Sex'].astype('category')
data_train['Sex'].cat.categories = ['0', '1']
data_train['Embarked'] = data_train['Embarked'].astype('category')
data_train['Embarked'].cat.categories = ['0', '1', '2']
#age缺省值用平均年龄填充
a = data_train.mean()
b = a[4]
def missed_age(df):
df['Age'] = df['Age'].fillna(b).astype(float)
return df
data_train = missed_age(data_train)
def set_Cabin_type(df):
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "1"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "0"
return df
data_train = set_Cabin_type(data_train)
data_train_input = data_train[['Pclass','Sex','Age','SibSp','Parch','Fare','Cabin','Embarked']]
data_train['Deceased'] = data_train['Survived'].apply(lambda s: 1 - s)
data_train_output = data_train[['Survived','Deceased']]
data_train_input = data_train_input.values
data_train_output = data_train_output.values
X_train,X_val,Y_train,Y_val = train_test_split(data_train_input,
data_train_output,
test_size = 0.2,
random_state = 42)
x = tf.placeholder(tf.float32, [None, 8], name = 'inputs')
y_ = tf.placeholder(tf.float32, [None, 2], name = 'labels')
weights_1 = tf.Variable(tf.random_normal([8, 8]), name='weights1')
biases_1 = tf.Variable(tf.zeros([8]), name='bias1')
a = tf.nn.tanh(tf.matmul(x , weights_1) + biases_1)
#keep_prob = tf.placeholder(tf.float32)
#a_drop = tf.nn.dropout(a, keep_prob)
weights2 = tf.Variable(tf.random_normal([8,2]),name = 'weights2')
bias2 = tf.Variable(tf.zeros([2]), name = 'bias2')
y_pred = tf.matmul(a, weights2) + bias2
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=(y_pred+1e-10),labels=y_))
cost = - tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_pred,1e-10,1.0)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cost)
#saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for epoch in range(2):
total_loss = 0
for i in range(len(X_train)):
feed_dict = {x: X_train, y_:Y_train}
train_step.run(feed_dict = feed_dict)
loss = cost.eval(feed_dict = feed_dict)
train_step.run(feed_dict = feed_dict)
total_loss +=loss
if i%300 == 0:
# print('loss==%.12f' % loss)
print(loss)
# print('Epoch: %4d, total loss = %.12f' % (epoch,total_loss))
代码贴上
试了很多办法都loss一直为难nan
cross_entropy中y_pred 也尝试让他不为0了
但是还是输出为nan啊???
晕死
2019年03月25日 08点03分
1
"""
Created on Fri Mar 22 14:31:50 2019
@author: Administrator
"""
import tensorflow as tf
import pandas as pd
import numpy as np
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
import sklearn.preprocessing as preprocessing
from sklearn import linear_model
from sklearn.model_selection import train_test_split
data_train = pd.read_csv(r"C:\Users\Administrator\Desktop\titanic\train.csv")
data_train['Sex'] = data_train['Sex'].astype('category')
data_train['Sex'].cat.categories = ['0', '1']
data_train['Embarked'] = data_train['Embarked'].astype('category')
data_train['Embarked'].cat.categories = ['0', '1', '2']
#age缺省值用平均年龄填充
a = data_train.mean()
b = a[4]
def missed_age(df):
df['Age'] = df['Age'].fillna(b).astype(float)
return df
data_train = missed_age(data_train)
def set_Cabin_type(df):
df.loc[ (df.Cabin.notnull()), 'Cabin' ] = "1"
df.loc[ (df.Cabin.isnull()), 'Cabin' ] = "0"
return df
data_train = set_Cabin_type(data_train)
data_train_input = data_train[['Pclass','Sex','Age','SibSp','Parch','Fare','Cabin','Embarked']]
data_train['Deceased'] = data_train['Survived'].apply(lambda s: 1 - s)
data_train_output = data_train[['Survived','Deceased']]
data_train_input = data_train_input.values
data_train_output = data_train_output.values
X_train,X_val,Y_train,Y_val = train_test_split(data_train_input,
data_train_output,
test_size = 0.2,
random_state = 42)
x = tf.placeholder(tf.float32, [None, 8], name = 'inputs')
y_ = tf.placeholder(tf.float32, [None, 2], name = 'labels')
weights_1 = tf.Variable(tf.random_normal([8, 8]), name='weights1')
biases_1 = tf.Variable(tf.zeros([8]), name='bias1')
a = tf.nn.tanh(tf.matmul(x , weights_1) + biases_1)
#keep_prob = tf.placeholder(tf.float32)
#a_drop = tf.nn.dropout(a, keep_prob)
weights2 = tf.Variable(tf.random_normal([8,2]),name = 'weights2')
bias2 = tf.Variable(tf.zeros([2]), name = 'bias2')
y_pred = tf.matmul(a, weights2) + bias2
#cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=(y_pred+1e-10),labels=y_))
cost = - tf.reduce_sum(y_*tf.log(tf.clip_by_value(y_pred,1e-10,1.0)))
train_step = tf.train.AdamOptimizer(0.001).minimize(cost)
#saver = tf.train.Saver()
with tf.Session() as sess:
tf.global_variables_initializer().run()
for epoch in range(2):
total_loss = 0
for i in range(len(X_train)):
feed_dict = {x: X_train, y_:Y_train}
train_step.run(feed_dict = feed_dict)
loss = cost.eval(feed_dict = feed_dict)
train_step.run(feed_dict = feed_dict)
total_loss +=loss
if i%300 == 0:
# print('loss==%.12f' % loss)
print(loss)
# print('Epoch: %4d, total loss = %.12f' % (epoch,total_loss))
代码贴上
试了很多办法都loss一直为难nan
cross_entropy中y_pred 也尝试让他不为0了
但是还是输出为nan啊???
晕死