我在用Julia训练VGG16,用的是NVIDIA TITIAN 有10G显存。请问这个可以完整训练VGG16吗? Julia Knet中有resnet,VGG的测试,这些参数是他们自己训练的吗?VGG,可以在我这种显卡上跑出来吗?我训练的时候Batch都是设为100的。
arraytype=KnetArray{Float64}
function getWeights(file;atype=KnetArray{Float32})
layers=get(file,"layers","")
meta=get(file,"meta","")
weights = Array{Any}(undef,32)
i=1
wnames=Any[]
for la in layers
wholename=get(la,"name","")
if length(wholename)>5
name=SubString(get(la,"name",""),1,4)
lastsyn=SubString(get(la,"name",""),5)
else
name=SubString(get(la,"name",""),1,2)
lastsyn=SubString(get(la,"name",""),3)
end
if name=="conv"
weights[i]=la["weights"][1]
#println(weights[i])
b=la["weights"][2]
weights[i+1]=reshape(b,(1,1,length(b),1))
#println(weights[i+1])
push!(wnames,string("w",lastsyn))
push!(wnames,string("b",lastsyn))
println(string("w",lastsyn))
println(string("b",lastsyn))
elseif name=="fc"
weights[i]=la["weights"][1]
#println(weights[i])
weights[i+1]=la["weights"][2]
#println(weights[i+1])
push!(wnames,string("w",lastsyn))
push!(wnames,string("b",lastsyn))
println(string("w",lastsyn))
println(string("b",lastsyn))
end
if name=="conv"||name=="fc"
i=i+2;
end
end
wv = matopen("softpro.mat");
weights[29],weights[30]=read(wv,"w","b");
#weights[28]=reshape(w[28],1,1,4096,1);
return map(a->convert(arraytype,a), weights)
end
function predict(w,x)
x1 = relu.(conv4(w[1],x;padding=1) .+ w[2])
x2 = relu.(conv4(w[3],x1;padding=1) .+ w[4])
x3 = pool(x2; window = 2)
x4 = relu.(conv4(w[5],x3;padding=1) .+ w[6])
x5 = relu.(conv4(w[7],x4;padding=1) .+ w[8])
x6 = pool(x5; window = 2)
x7 = relu.(conv4(w[9],x6;padding=1) .+ w[10])
x8 = relu.(conv4(w[11],x7;padding=1) .+ w[12])
x9 = relu.(conv4(w[13],x8;padding=1) .+ w[14])
x10 = pool(x9; window = 2)
x11 = relu.(conv4(w[15],x10;padding=1) .+ w[16])
x12 = relu.(conv4(w[17],x11;padding=1) .+ w[18])
x13 = relu.(conv4(w[19],x12;padding=1) .+ w[20])
x14 = pool(x13; window = 2)
x15 = relu.(conv4(w[21],x14;padding=1) .+ w[22])
x16 = relu.(conv4(w[23],x15;padding=1) .+ w[24])
x17 = relu.(conv4(w[25],x16;padding=1) .+ w[26])
x18 = pool(x17; window = 2)
#x19 = dropout(x18,0.5)
temp=mat(conv4(w[27],x18;padding=1))
x19 = relu.(temp.+ w[28])
x20 = pool(x19; window = 2)
#x21 = dropout(x20,0.5)
x21 = relu.(w[29]*x20.+w[30])
end