Toggle navigation
Toggle navigation
This project
Loading...
Sign in
박하늘
/
stock_chatbot
Go to a project
Toggle navigation
Toggle navigation pinning
Projects
Groups
Snippets
Help
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Snippets
Network
Create a new issue
Builds
Commits
Issue Boards
Authored by
ZuseongZIN
2021-06-03 23:06:48 +0900
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Commit
bab72821ac9cb9c60f29534c79ae7108f3232268
bab72821
1 parent
8f964b4e
feature add: backtest code (백테스트코드 추가)
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
1253 additions
and
16 deletions
pyfiles/optimizer.py
pyfiles/stockcodename.csv
pyfiles/workspace.ipynb
pyfiles/optimizer.py
View file @
bab7282
...
...
@@ -4,6 +4,17 @@ import numpy as np
import
FinanceDataReader
as
fdr
from
scipy.optimize
import
minimize
import
json
from
datetime
import
date
import
math
import
itertools
as
it
import
operator
from
datetime
import
datetime
from
scipy
import
stats
from
scipy.stats
import
norm
from
dateutil
import
rrule
from
calendar
import
monthrange
from
dateutil.relativedelta
import
relativedelta
from
ast
import
literal_eval
#소숫점 표현
pd
.
options
.
display
.
float_format
=
'{:.3f}'
.
format
...
...
@@ -100,4 +111,472 @@ class c_Models:
rp
=
minimize
(
RP_objective
,
w0
,
constraints
=
constraints
,
bounds
=
bd
,
method
=
'SLSQP'
)
result
=
dict
(
zip
(
self
.
asset_name
,
np
.
round
(
rp
.
x
,
3
)))
return
result
#, RC(self.cov, rp.x)
\ No newline at end of file
return
result
#, RC(self.cov, rp.x)
def
plotting
(
self
):
wt_gmv
=
np
.
asarray
(
list
(
self
.
gmv_opt
()
.
values
()))
wt_ms
=
np
.
asarray
(
list
(
self
.
ms_opt
()
.
values
()))
wt_rp
=
np
.
asarray
(
list
(
self
.
rp_opt
()
.
values
()))
ret_gmv
=
np
.
dot
(
wt_gmv
,
self
.
mu
)
ret_ms
=
np
.
dot
(
wt_ms
,
self
.
mu
)
ret_rp
=
np
.
dot
(
wt_rp
,
self
.
mu
)
vol_gmv
=
np
.
sqrt
(
np
.
dot
(
wt_gmv
.
T
,
np
.
dot
(
self
.
cov
,
wt_gmv
)))
vol_ms
=
np
.
sqrt
(
np
.
dot
(
wt_ms
.
T
,
np
.
dot
(
self
.
cov
,
wt_ms
)))
vol_rp
=
np
.
sqrt
(
np
.
dot
(
wt_rp
.
T
,
np
.
dot
(
self
.
cov
,
wt_rp
)))
wt_gmv
=
wt_gmv
.
tolist
()
wt_ms
=
wt_ms
.
tolist
()
wt_rp
=
wt_rp
.
tolist
()
user_ret
=
np
.
dot
(
self
.
assets_w
,
self
.
mu
)
user_risk
=
np
.
sqrt
(
np
.
dot
(
self
.
assets_w
,
np
.
dot
(
self
.
cov
,
self
.
assets_w
)))
weights
=
{
'gmv'
:
wt_gmv
,
"ms"
:
wt_ms
,
"rp"
:
wt_rp
}
#rec_rs = recommended_asset()
trets
=
np
.
linspace
(
ret_gmv
,
max
(
self
.
mu
),
30
)
# 30개 짜르기
tvols
=
[]
efpoints
=
dict
()
for
i
,
tret
in
enumerate
(
trets
):
#이 개별 return마다 최소 risk 찾기
n_assets
=
len
(
self
.
data
.
columns
)
w0
=
np
.
ones
(
n_assets
)
/
n_assets
fun
=
lambda
w
:
np
.
dot
(
w
.
T
,
np
.
dot
(
self
.
cov
,
w
))
constraints
=
[{
'type'
:
'eq'
,
'fun'
:
lambda
x
:
np
.
sum
(
x
)
-
1
},
{
'type'
:
'ineq'
,
'fun'
:
lambda
x
:
np
.
dot
(
x
,
self
.
mu
)
-
tret
}]
#{'type': 'ineq', 'fun': lambda x: x}]
bd
=
((
0
,
1
),)
*
n_assets
minvol
=
minimize
(
fun
,
w0
,
method
=
'SLSQP'
,
bounds
=
bd
,
constraints
=
constraints
)
tvols
.
append
(
np
.
sqrt
(
np
.
dot
(
minvol
.
x
,
np
.
dot
(
self
.
cov
,
minvol
.
x
))))
pnumber
=
'{}point'
.
format
(
i
+
1
)
efpoints
[
pnumber
]
=
minvol
.
x
.
tolist
()
if
self
.
data
.
shape
[
0
]
<=
1
:
error
=
'기간에러'
return
error
,
1
,
1
else
:
ret_vol
=
{
"GMV"
:
[
vol_gmv
,
ret_gmv
],
"MaxSharp"
:
[
vol_ms
,
ret_ms
],
"RiskParity"
:
[
vol_rp
,
ret_rp
],
"Trets"
:
trets
.
tolist
(),
"Tvols"
:
tvols
,
"User"
:
[
user_risk
,
user_ret
]}
#, "Recommended" : rec_rs}
return
ret_vol
,
json
.
dumps
(
efpoints
),
json
.
dumps
(
weights
)
class
back_test
:
# 단순 일별수익률의 평균을 *365하여 연간수익률을 산출
def
__init__
(
self
):
self
.
test
=
0
def
Arithmetic_Mean_Annual
(
self
,
ret
):
month_return
=
np
.
mean
(
ret
)
return
(
month_return
*
252
)
# 기간중 투자했을때 하락할 수 있는 비율
def
dd
(
self
,
ret
):
cum_ret
=
(
1
+
ret
)
.
cumprod
()
max_drawdown
=
0
max_ret
=
1
dd_list
=
[]
c
=
0
for
ix_ret
in
cum_ret
.
values
:
if
max_ret
<
ix_ret
:
max_ret
=
ix_ret
dd_list
.
append
((
ix_ret
-
max_ret
)
/
max_ret
)
c
=
c
+
1
return
dd_list
# 기간중 투자했을때 최고로 많이 하락할 수 있는 비율
def
mdd
(
self
,
ret
):
cum_ret
=
(
1
+
ret
)
.
cumprod
()
max_drawdown
=
0
max_ret
=
1
for
ix_ret
in
cum_ret
.
values
:
if
max_drawdown
>
(
ix_ret
-
max_ret
)
/
max_ret
:
max_drawdown
=
(
ix_ret
-
max_ret
)
/
max_ret
if
max_ret
<
ix_ret
:
max_ret
=
ix_ret
return
abs
(
max_drawdown
)
# 포트폴리오 수익률에서 무위험 수익률을 제한 후 이를 포트폴리오의 표준편차로 나눠 산출한 값, 즉 위험대비 얼마나 수익이 좋은지의 척도
def
sharpe_ratio
(
self
,
ret
,
rf
=
0.008
,
num_of_date
=
252
):
return
((
np
.
mean
(
ret
-
(
rf
/
num_of_date
)))
/
(
np
.
std
(
ret
)))
*
np
.
sqrt
(
num_of_date
)
# 설정한 confidence level에 따른(95%) 확률로 발생할 수 있는 손실액의 최대 액수
def
value_at_risk
(
self
,
ret
,
para_or_hist
=
"para"
,
confidence_level
=
0.95
):
vol
=
np
.
std
(
ret
)
if
para_or_hist
==
"para"
:
VaR
=
np
.
mean
(
ret
)
-
vol
*
norm
.
ppf
(
confidence_level
)
else
:
print
(
'error'
)
return
VaR
# 전체 투자기간에서 상승한 ( ret > 0 ) 기간의 비율
def
winning_rate
(
self
,
ret
):
var_winning_rate
=
np
.
sum
(
ret
>
0
)
/
len
(
ret
)
return
var_winning_rate
# 상승한날의 평균상승값을 하락한날의 평균하락값으로 나눈 비율
def
profit_loss_ratio
(
self
,
ret
):
if
np
.
sum
(
ret
>
0
)
==
0
:
var_profit_loss_ratio
=
0
elif
np
.
sum
(
ret
<
0
)
==
0
:
var_profit_loss_ratio
=
np
.
inf
else
:
win_mean
=
np
.
mean
(
ret
[
ret
>
0
])
loss_mean
=
np
.
mean
(
ret
[
ret
<
0
])
var_profit_loss_ratio
=
win_mean
/
loss_mean
return
abs
(
var_profit_loss_ratio
)
# 데이터 취합하는 코드
#임시로 5가지 데이터 예시를 활용해 코드작성
# 선택한 종목의 이름과 비중, 투자기간을 input 값으로 받음
def
backtest_data
(
self
,
assets
,
weight
,
start_data_1
,
end_data_1
,
start_amount
,
rebalancing_month
,
interval
,
opt_option
):
# input으로 받는 assetnames 입력
a
=
assets
stock_num
=
len
(
a
)
# input으로 받는 assetweights 입력
rebal_month
=
int
(
rebalancing_month
)
# input으로 받는 rebalancing_month를 입력
# 나타내는 데이터 간격을 표시
# weight 간격
b
=
list
(
map
(
float
,
weight
))
# input으로 받는 from_period와 to_period 입력
stock_return
=
pd
.
date_range
(
start
=
start_data_1
,
end
=
end_data_1
)
stock_return
=
pd
.
DataFrame
(
stock_return
)
stock_return
.
columns
=
[
'Date'
]
stocks
=
pd
.
read_csv
(
'stockcodename.csv'
,
index_col
=
0
)
symbol
=
''
asset_name
=
assets
[:]
for
k
in
range
(
len
(
assets
)):
for
i
in
enumerate
(
stocks
.
Name
):
if
i
[
1
]
==
assets
[
k
]:
assets
[
k
]
=
(
stocks
.
iloc
[
i
[
0
]]
.
Symbol
)
break
# input으로 받는 from_period와 to_period 입력
stock_return
=
pd
.
date_range
(
start
=
start_data_1
,
end
=
end_data_1
)
stock_return
=
pd
.
DataFrame
(
stock_return
)
stock_return
.
columns
=
[
'Date'
]
for
asset
in
assets
:
#total_list:
tmp
=
fdr
.
DataReader
(
asset
,
start_data_1
,
end_data_1
)
tmp
.
insert
(
1
,
"Date"
,
tmp
.
index
.
copy
(),
True
)
tmp
=
tmp
[[
'Date'
,
'Change'
]]
tmp
.
columns
=
[
'Date'
,
asset
]
tmp
=
tmp
.
reset_index
(
drop
=
True
)
stock_return
=
pd
.
merge
(
stock_return
,
tmp
,
how
=
'inner'
,
on
=
'Date'
)
stock_return
=
stock_return
.
dropna
(
axis
=
0
)
#print(stock_return)
if
opt_option
==
'basic'
:
# 투자비중으로 이루어진 dataframe 만들기
start_datetime
=
stock_return
.
iloc
[
0
,
0
]
end_datetime
=
stock_return
.
iloc
[
-
1
,
0
]
diff_months_list
=
list
(
rrule
.
rrule
(
rrule
.
MONTHLY
,
dtstart
=
start_datetime
,
until
=
end_datetime
))
month_gap
=
len
(
diff_months_list
)
rebal_roof
=
month_gap
//
rebal_month
rebal_weight
=
pd
.
DataFrame
()
for
i
in
range
(
rebal_roof
+
1
):
# 데이터로부터 리밸런싱기간만큼 가져오기
filtered_df
=
stock_return
.
loc
[
stock_return
[
"Date"
]
.
between
(
start_datetime
,
start_datetime
+
relativedelta
(
months
=
rebal_month
)
+
relativedelta
(
days
=
-
1
))]
# 리밸런싱 기간의 누적수익률 산출
for
j
in
range
(
stock_num
):
filtered_df
.
iloc
[:,
j
+
1
]
=
(
1
+
filtered_df
.
iloc
[:,
j
+
1
])
.
cumprod
()
# 해당 누적수익률에 initial 투자비중을 곱해준다
for
j
in
range
(
stock_num
):
filtered_df
.
iloc
[:,
j
+
1
]
=
filtered_df
.
iloc
[:,
j
+
1
]
*
float
(
b
[
j
])
# 이후 각각의 종목의 비중을 계산해서 산출한다
filtered_df
[
'total_value'
]
=
filtered_df
.
sum
(
axis
=
1
)
for
j
in
range
(
stock_num
):
filtered_df
.
iloc
[:,
j
+
1
]
=
filtered_df
.
iloc
[:,
j
+
1
]
/
filtered_df
[
'total_value'
]
rebal_weight
=
pd
.
concat
([
rebal_weight
,
filtered_df
])
start_datetime
=
start_datetime
+
relativedelta
(
months
=
rebal_month
)
#final_day = monthrange(start_datetime.year, start_datetime.month)
stock_weight
=
rebal_weight
.
iloc
[:,:
-
1
]
#print(stock_weight)
'''
stock_weight = stock_return.Date
stock_weight = pd.DataFrame(stock_weight)
c = 0
for stockweight in b:
stock_weight[a[c]] = float(stockweight)
c = c + 1
#print(stock_weight)
'''
else
:
# 포트폴리오 최적화 코드를 통한 리벨런싱 이중 리스트 weight 산출
# 1. 입력 받은 start ~ end 날짜를 리밸런싱 기간으로 쪼개기
opt_start_datetime
=
stock_return
.
iloc
[
0
,
0
]
opt_end_datetime
=
stock_return
.
iloc
[
-
1
,
0
]
opt_diff_months_list
=
list
(
rrule
.
rrule
(
rrule
.
MONTHLY
,
dtstart
=
opt_start_datetime
,
until
=
opt_end_datetime
))
opt_month_gap
=
len
(
opt_diff_months_list
)
opt_rebal_roof
=
opt_month_gap
//
rebal_month
opt_rebal_weight
=
pd
.
DataFrame
()
#opt_array = [[0]*stock_num]*(opt_rebal_roof+1)
for
i
in
range
(
opt_rebal_roof
+
1
):
opt_df
=
stock_return
.
loc
[
stock_return
[
"Date"
]
.
between
(
opt_start_datetime
,
opt_start_datetime
+
relativedelta
(
months
=
rebal_month
)
+
relativedelta
(
days
=
-
1
))]
# 최적화 코드에서 기간마다의 가중치를 가져온다
c_m
=
c_Models
(
a
,
b
,
opt_df
.
iat
[
0
,
0
]
-
relativedelta
(
months
=
3
),
opt_df
.
iat
[
-
1
,
0
])
ret_vol
,
efpoints
,
weights
=
c_m
.
plotting
()
weights
=
literal_eval
(
weights
)
weights
=
weights
.
get
(
opt_option
)
##print(weights)
# 리밸런싱 기간의 누적수익률 산출
for
j
in
range
(
stock_num
):
opt_df
.
iloc
[:,
j
+
1
]
=
(
1
+
opt_df
.
iloc
[:,
j
+
1
])
.
cumprod
()
# 해당 누적수익률에 initial 투자비중을 곱해준다
for
j
in
range
(
stock_num
):
opt_df
.
iloc
[:,
j
+
1
]
=
opt_df
.
iloc
[:,
j
+
1
]
*
float
(
weights
[
j
])
# 이후 각각의 종목의 비중을 계산해서 산출한다
opt_df
[
'total_value'
]
=
opt_df
.
sum
(
axis
=
1
)
for
j
in
range
(
stock_num
):
opt_df
.
iloc
[:,
j
+
1
]
=
opt_df
.
iloc
[:,
j
+
1
]
/
opt_df
[
'total_value'
]
# 이후 각각의 종목의 비중을 계산해서 산출한다
#print(opt_df)
opt_rebal_weight
=
pd
.
concat
([
opt_rebal_weight
,
opt_df
])
opt_start_datetime
=
opt_start_datetime
+
relativedelta
(
months
=
rebal_month
)
#리밸런싱으로 start 기간이 고객이 원하는 end 기간보다 커지게 되면 종료
if
opt_start_datetime
>
stock_return
.
iloc
[
-
1
,
0
]:
# i가 100일 때
break
stock_weight
=
opt_rebal_weight
.
iloc
[:,:
-
1
]
##print(stock_weight)
# 수익률 데이터와 투자비중을 곱한 하나의 데이터 생성
pfo_return
=
stock_weight
.
Date
pfo_return
=
pd
.
DataFrame
(
pfo_return
)
# weight 와 return의 날짜 맞춰주기
#pfo_return = pfo_return[0:len(stock_weight)]
pfo_return
=
pd
.
merge
(
pfo_return
,
stock_return
,
left_on
=
'Date'
,
right_on
=
'Date'
,
how
=
'left'
)
pfo_return
[
'mean_return'
]
=
0
##print(pfo_return)
for
i
in
range
(
0
,
len
(
pfo_return
)):
return_result
=
list
(
pfo_return
.
iloc
[
i
,
1
:
1
+
stock_num
])
return_weight
=
list
(
stock_weight
.
iloc
[
i
,
1
:
1
+
stock_num
])
pfo_return
.
iloc
[
i
,
1
+
stock_num
]
=
np
.
dot
(
return_result
,
return_weight
)
#rint(pfo_return)
pfo_return
[
'acc_return'
]
=
[
x
+
1
for
x
in
pfo_return
[
'mean_return'
]]
pfo_return
[
'acc_return'
]
=
list
(
it
.
accumulate
(
pfo_return
[
'acc_return'
],
operator
.
mul
))
pfo_return
[
'acc_return'
]
=
[
x
-
1
for
x
in
pfo_return
[
'acc_return'
]]
pfo_return
[
'final_balance'
]
=
float
(
start_amount
)
+
float
(
start_amount
)
*
pfo_return
[
'acc_return'
]
pfo_return
[
'Drawdown_list'
]
=
back_test
.
dd
(
input
,
pfo_return
[
'mean_return'
])
pfo_return
=
pfo_return
.
set_index
(
'Date'
)
#print(pfo_return)
### 벤치마크 데이터 로드 및 전처리
tiker_list
=
[
'KS11'
,
'US500'
]
bench_list
=
[
fdr
.
DataReader
(
ticker
,
start_data_1
,
end_data_1
)[
'Change'
]
for
ticker
in
tiker_list
]
bench
=
pd
.
concat
(
bench_list
,
axis
=
1
)
bench
.
columns
=
[
'KOSPI'
,
'S&P500'
]
bench
[
'KOSPI'
]
=
bench
[
'KOSPI'
]
.
fillna
(
0
)
bench
[
'S&P500'
]
=
bench
[
'S&P500'
]
.
fillna
(
0
)
#bench = bench.dropna()
# 벤치마크 누적수익률, DD 값
bench
[
'KOSPI_acc'
]
=
[
x
+
1
for
x
in
bench
[
'KOSPI'
]]
bench
[
'KOSPI_acc'
]
=
list
(
it
.
accumulate
(
bench
[
'KOSPI_acc'
],
operator
.
mul
))
bench
[
'KOSPI_acc'
]
=
[
x
-
1
for
x
in
bench
[
'KOSPI_acc'
]]
bench
[
'KOSPI_balance'
]
=
float
(
start_amount
)
+
float
(
start_amount
)
*
bench
[
'KOSPI_acc'
]
bench
[
'KOSPI_Drawdown'
]
=
back_test
.
dd
(
input
,
bench
[
'KOSPI'
])
bench
[
'S&P500_acc'
]
=
[
x
+
1
for
x
in
bench
[
'S&P500'
]]
bench
[
'S&P500_acc'
]
=
list
(
it
.
accumulate
(
bench
[
'S&P500_acc'
],
operator
.
mul
))
bench
[
'S&P500_acc'
]
=
[
x
-
1
for
x
in
bench
[
'S&P500_acc'
]]
bench
[
'S&P500_balance'
]
=
float
(
start_amount
)
+
float
(
start_amount
)
*
bench
[
'S&P500_acc'
]
bench
[
'S&P500_Drawdown'
]
=
back_test
.
dd
(
input
,
bench
[
'S&P500'
])
if
interval
==
'monthly'
or
interval
==
'weekly'
:
if
interval
==
'monthly'
:
inter
=
'M'
if
interval
==
'weekly'
:
inter
=
'W'
pfo_return_interval
=
pfo_return
.
resample
(
inter
)
.
last
()
pfo_return_first
=
pd
.
DataFrame
(
pfo_return
.
iloc
[
0
])
.
transpose
()
pfo_return_interval
=
pd
.
concat
([
pfo_return_first
,
pfo_return_interval
])
pfo_return_interval
[
'mean_return'
]
=
pfo_return_interval
[
'final_balance'
]
.
pct_change
()
pfo_return_interval
=
pfo_return_interval
.
dropna
()
# 월별 간격으로 만들어주기, 여기서는 return과 value만 monthly로 산출함 나머지값은 daily
bench_interval
=
bench
.
resample
(
inter
)
.
last
()
#bench_ex['KOSPI'] = bench_ex['final_balance'].pct_change()
bench_first
=
pd
.
DataFrame
(
bench
.
iloc
[
0
])
.
transpose
()
bench_interval
=
pd
.
concat
([
bench_first
,
bench_interval
])
bench_interval
[
'KOSPI'
]
=
bench_interval
[
'KOSPI_balance'
]
.
pct_change
()
bench_interval
[
'S&P500'
]
=
bench_interval
[
'S&P500_balance'
]
.
pct_change
()
bench_interval
=
bench_interval
.
dropna
()
# 날짜타입 열로 만들기 및 str 타입으로 전처리
pfo_return
=
pfo_return
.
rename_axis
(
'Date'
)
.
reset_index
()
pfo_return
[
'Date'
]
=
pd
.
to_datetime
(
pfo_return
[
'Date'
],
format
=
'
%
d/
%
m/
%
Y'
)
.
dt
.
date
pfo_return
[
'Date'
]
=
list
(
map
(
str
,
pfo_return
[
'Date'
]))
pfo_return_interval
=
pfo_return_interval
.
rename_axis
(
'Date'
)
.
reset_index
()
pfo_return_interval
[
'Date'
]
=
pd
.
to_datetime
(
pfo_return_interval
[
'Date'
],
format
=
'
%
d/
%
m/
%
Y'
)
.
dt
.
date
pfo_return_interval
[
'Date'
]
=
list
(
map
(
str
,
pfo_return_interval
[
'Date'
]))
bench
=
bench
.
rename_axis
(
'Date'
)
.
reset_index
()
bench
[
'Date'
]
=
pd
.
to_datetime
(
bench
[
'Date'
],
format
=
'
%
d/
%
m/
%
Y'
)
.
dt
.
date
bench
[
'Date'
]
=
list
(
map
(
str
,
bench
[
'Date'
]))
bench_interval
=
bench_interval
.
rename_axis
(
'Date'
)
.
reset_index
()
bench_interval
[
'Date'
]
=
pd
.
to_datetime
(
bench_interval
[
'Date'
],
format
=
'
%
d/
%
m/
%
Y'
)
.
dt
.
date
bench_interval
[
'Date'
]
=
list
(
map
(
str
,
bench_interval
[
'Date'
]))
backtest_return
=
{
'pfo_return'
:
[
{
'Date'
:
list
(
pfo_return_interval
[
'Date'
]),
'mean_return'
:
list
(
pfo_return_interval
[
'mean_return'
]),
'acc_return ratio'
:
list
(
pfo_return_interval
[
'acc_return'
]),
'final_balance'
:
list
(
pfo_return_interval
[
'final_balance'
]),
'Drawdown_list'
:
list
(
pfo_return_interval
[
'Drawdown_list'
])
}
],
'bench'
:
[
{
'Date'
:
list
(
bench_interval
[
'Date'
]),
'KOSPI_return'
:
list
(
bench_interval
[
'KOSPI'
]),
'S&P500_return'
:
list
(
bench_interval
[
'S&P500'
]),
'KOSPI_acc_return'
:
list
(
bench_interval
[
'KOSPI_acc'
]),
'KOSPI_balance'
:
list
(
bench_interval
[
'KOSPI_balance'
]),
'KOSPI_Drawdown'
:
list
(
bench_interval
[
'KOSPI_Drawdown'
]),
'S&P500_acc_return'
:
list
(
bench_interval
[
'S&P500_acc'
]),
'S&P500_balance'
:
list
(
bench_interval
[
'S&P500_balance'
]),
'S&P500_Drawdown'
:
list
(
bench_interval
[
'S&P500_Drawdown'
])
}
],
'indicator'
:
[
{
'Mean'
:
back_test
.
Arithmetic_Mean_Annual
(
input
,
pfo_return
[
'mean_return'
]),
'Std'
:
pfo_return
[
'mean_return'
]
.
std
()
*
np
.
sqrt
(
365
),
'Sharpe ratio'
:
back_test
.
sharpe_ratio
(
input
,
pfo_return
[
'mean_return'
]),
'VaR'
:
back_test
.
value_at_risk
(
input
,
pfo_return
[
'mean_return'
]),
'MDD'
:
back_test
.
mdd
(
input
,
pfo_return
[
'mean_return'
]),
'Winning ratio'
:
back_test
.
winning_rate
(
input
,
pfo_return
[
'mean_return'
]),
'Gain/Loss Ratio'
:
back_test
.
profit_loss_ratio
(
input
,
pfo_return
[
'mean_return'
])
}
],
'KOSPI_indicator'
:
[
{
'Mean'
:
back_test
.
Arithmetic_Mean_Annual
(
input
,
bench
[
'KOSPI'
]),
'Std'
:
bench
[
'KOSPI'
]
.
std
()
*
np
.
sqrt
(
365
),
'Sharpe ratio'
:
back_test
.
sharpe_ratio
(
input
,
bench
[
'KOSPI'
]),
'VaR'
:
back_test
.
value_at_risk
(
input
,
bench
[
'KOSPI'
]),
'MDD'
:
back_test
.
mdd
(
input
,
bench
[
'KOSPI'
]),
'Winning ratio'
:
back_test
.
winning_rate
(
input
,
bench
[
'KOSPI'
]),
'Gain/Loss Ratio'
:
back_test
.
profit_loss_ratio
(
input
,
bench
[
'KOSPI'
])
}
],
'S&P500_indicator'
:
[
{
'Mean'
:
back_test
.
Arithmetic_Mean_Annual
(
input
,
bench
[
'S&P500'
]),
'Std'
:
bench
[
'S&P500'
]
.
std
()
*
np
.
sqrt
(
365
),
'Sharpe ratio'
:
back_test
.
sharpe_ratio
(
input
,
bench
[
'S&P500'
]),
'VaR'
:
back_test
.
value_at_risk
(
input
,
bench
[
'S&P500'
]),
'MDD'
:
back_test
.
mdd
(
input
,
bench
[
'S&P500'
]),
'Winning ratio'
:
back_test
.
winning_rate
(
input
,
bench
[
'S&P500'
]),
'Gain/Loss Ratio'
:
back_test
.
profit_loss_ratio
(
input
,
bench
[
'S&P500'
])
}
]
}
else
:
# 날짜타입 열로 만들기 및 str 타입으로 전처리
pfo_return
=
pfo_return
.
rename_axis
(
'Date'
)
.
reset_index
()
pfo_return
[
'Date'
]
=
pd
.
to_datetime
(
pfo_return
[
'Date'
],
format
=
'
%
d/
%
m/
%
Y'
)
.
dt
.
date
pfo_return
[
'Date'
]
=
list
(
map
(
str
,
pfo_return
[
'Date'
]))
bench
=
bench
.
rename_axis
(
'Date'
)
.
reset_index
()
bench
[
'Date'
]
=
pd
.
to_datetime
(
bench
[
'Date'
],
format
=
'
%
d/
%
m/
%
Y'
)
.
dt
.
date
bench
[
'Date'
]
=
list
(
map
(
str
,
bench
[
'Date'
]))
backtest_return
=
{
'pfo_return'
:
[
{
'Date'
:
list
(
pfo_return
[
'Date'
]),
'mean_return'
:
list
(
pfo_return
[
'mean_return'
]),
'acc_return ratio'
:
list
(
pfo_return
[
'acc_return'
]),
'final_balance'
:
list
(
pfo_return
[
'final_balance'
]),
'Drawdown_list'
:
list
(
pfo_return
[
'Drawdown_list'
])
}
],
'bench'
:
[
{
'Date'
:
list
(
bench
[
'Date'
]),
'KOSPI_return'
:
list
(
bench
[
'KOSPI'
]),
'S&P500_return'
:
list
(
bench
[
'S&P500'
]),
'KOSPI_acc_return'
:
list
(
bench
[
'KOSPI_acc'
]),
'KOSPI_balance'
:
list
(
bench
[
'KOSPI_balance'
]),
'KOSPI_Drawdown'
:
list
(
bench
[
'KOSPI_Drawdown'
]),
'S&P500_acc_return'
:
list
(
bench
[
'S&P500_acc'
]),
'S&P500_balance'
:
list
(
bench
[
'S&P500_balance'
]),
'S&P500_Drawdown'
:
list
(
bench
[
'S&P500_Drawdown'
])
}
],
'indicator'
:
[
{
'Mean'
:
back_test
.
Arithmetic_Mean_Annual
(
input
,
pfo_return
[
'mean_return'
]),
'Std'
:
pfo_return
[
'mean_return'
]
.
std
()
*
np
.
sqrt
(
365
),
'Sharpe ratio'
:
back_test
.
sharpe_ratio
(
input
,
pfo_return
[
'mean_return'
]),
'VaR'
:
back_test
.
value_at_risk
(
input
,
pfo_return
[
'mean_return'
]),
'MDD'
:
back_test
.
mdd
(
input
,
pfo_return
[
'mean_return'
]),
'Winning ratio'
:
back_test
.
winning_rate
(
input
,
pfo_return
[
'mean_return'
]),
'Gain/Loss Ratio'
:
back_test
.
profit_loss_ratio
(
input
,
pfo_return
[
'mean_return'
])
}
],
'KOSPI_indicator'
:
[
{
'Mean'
:
back_test
.
Arithmetic_Mean_Annual
(
input
,
bench
[
'KOSPI'
]),
'Std'
:
bench
[
'KOSPI'
]
.
std
()
*
np
.
sqrt
(
365
),
'Sharpe ratio'
:
back_test
.
sharpe_ratio
(
input
,
bench
[
'KOSPI'
]),
'VaR'
:
back_test
.
value_at_risk
(
input
,
bench
[
'KOSPI'
]),
'MDD'
:
back_test
.
mdd
(
input
,
bench
[
'KOSPI'
]),
'Winning ratio'
:
back_test
.
winning_rate
(
input
,
bench
[
'KOSPI'
]),
'Gain/Loss Ratio'
:
back_test
.
profit_loss_ratio
(
input
,
bench
[
'KOSPI'
])
}
],
'S&P500_indicator'
:
[
{
'Mean'
:
back_test
.
Arithmetic_Mean_Annual
(
input
,
bench
[
'S&P500'
]),
'Std'
:
bench
[
'S&P500'
]
.
std
()
*
np
.
sqrt
(
365
),
'Sharpe ratio'
:
back_test
.
sharpe_ratio
(
input
,
bench
[
'S&P500'
]),
'VaR'
:
back_test
.
value_at_risk
(
input
,
bench
[
'S&P500'
]),
'MDD'
:
back_test
.
mdd
(
input
,
bench
[
'S&P500'
]),
'Winning ratio'
:
back_test
.
winning_rate
(
input
,
bench
[
'S&P500'
]),
'Gain/Loss Ratio'
:
back_test
.
profit_loss_ratio
(
input
,
bench
[
'S&P500'
])
}
]
}
return
backtest_return
print
(
back_test
()
.
backtest_data
([
'삼성전자'
,
'LG전자'
],[
0.9
,
0.1
],
'2010-01-01'
,
'2021-01-01'
,
10000000
,
3
,
'monthly'
,
'gmv'
)[
'pfo_return'
]
.
mean_return
)
...
...
pyfiles/stockcodename.csv
View file @
bab7282
This diff could not be displayed because it is too large.
pyfiles/workspace.ipynb
View file @
bab7282
...
...
@@ -2,7 +2,7 @@
"cells"
:
[
{
"cell_type"
:
"code"
,
"execution_count"
:
2
,
"execution_count"
:
1
,
"metadata"
:
{},
"outputs"
:
[],
"source"
:
[
...
...
@@ -13,7 +13,7 @@
},
{
"cell_type"
:
"code"
,
"execution_count"
:
41
,
"execution_count"
:
2
,
"metadata"
:
{},
"outputs"
:
[],
"source"
:
[
...
...
@@ -42,14 +42,14 @@
},
{
"cell_type"
:
"code"
,
"execution_count"
:
4
2
,
"execution_count"
:
4
,
"metadata"
:
{},
"outputs"
:
[
{
"name"
:
"stdout"
,
"output_type"
:
"stream"
,
"text"
:
[
"{'현재가':
79700, '거래랑': 13358073, '전일 대비 수익률:': -0.004993757802746579
}
\n
"
"{'현재가':
82800, '거래랑': 29341312, '전일 대비 수익률:': 0.024752475247524774
}
\n
"
]
}
],
...
...
@@ -59,7 +59,7 @@
},
{
"cell_type"
:
"code"
,
"execution_count"
:
6
,
"execution_count"
:
5
,
"metadata"
:
{},
"outputs"
:
[],
"source"
:
[
...
...
@@ -69,7 +69,7 @@
},
{
"cell_type"
:
"code"
,
"execution_count"
:
12
3
,
"execution_count"
:
3
,
"metadata"
:
{},
"outputs"
:
[],
"source"
:
[
...
...
@@ -79,6 +79,17 @@
"import FinanceDataReader as fdr
\n
"
,
"from scipy.optimize import minimize
\n
"
,
"import json
\n
"
,
"from datetime import date
\n
"
,
"import math
\n
"
,
"import itertools as it
\n
"
,
"import operator
\n
"
,
"from datetime import datetime
\n
"
,
"from scipy import stats
\n
"
,
"from scipy.stats import norm
\n
"
,
"from dateutil import rrule
\n
"
,
"from calendar import monthrange
\n
"
,
"from dateutil.relativedelta import relativedelta
\n
"
,
"from ast import literal_eval
\n
"
,
"
\n
"
,
"#소숫점 표현
\n
"
,
"pd.options.display.float_format = '{:.3f}'.format
\n
"
,
...
...
@@ -175,33 +186,567 @@
"
\n
"
,
" rp = minimize(RP_objective, w0, constraints=constraints, bounds = bd, method='SLSQP')
\n
"
,
" result = dict(zip(self.asset_name, np.round(rp.x,3)))
\n
"
,
" return result #, RC(self.cov, rp.x)"
" return result #, RC(self.cov, rp.x)
\n
"
,
"
\n
"
,
" def plotting(self):
\n
"
,
" wt_gmv = np.asarray(list(self.gmv_opt().values()))
\n
"
,
" wt_ms = np.asarray(list(self.ms_opt().values()))
\n
"
,
" wt_rp = np.asarray(list(self.rp_opt().values()))
\n
"
,
"
\n
"
,
" ret_gmv = np.dot(wt_gmv, self.mu)
\n
"
,
" ret_ms = np.dot(wt_ms, self.mu)
\n
"
,
" ret_rp = np.dot(wt_rp, self.mu)
\n
"
,
" vol_gmv = np.sqrt(np.dot(wt_gmv.T, np.dot(self.cov, wt_gmv)))
\n
"
,
" vol_ms = np.sqrt(np.dot(wt_ms.T, np.dot(self.cov, wt_ms)))
\n
"
,
" vol_rp = np.sqrt(np.dot(wt_rp.T, np.dot(self.cov, wt_rp)))
\n
"
,
"
\n
"
,
" wt_gmv = wt_gmv.tolist()
\n
"
,
" wt_ms = wt_ms.tolist()
\n
"
,
" wt_rp = wt_rp.tolist()
\n
"
,
"
\n
"
,
" user_ret = np.dot(self.assets_w, self.mu)
\n
"
,
" user_risk = np.sqrt(np.dot(self.assets_w, np.dot(self.cov, self.assets_w)))
\n
"
,
"
\n
"
,
" weights = {'gmv': wt_gmv,
\"
ms
\"
: wt_ms,
\"
rp
\"
: wt_rp}
\n
"
,
"
\n
"
,
" #rec_rs = recommended_asset()
\n
"
,
"
\n
"
,
" trets = np.linspace(ret_gmv, max(self.mu), 30) # 30개 짜르기
\n
"
,
" tvols = []
\n
"
,
"
\n
"
,
" efpoints = dict()
\n
"
,
" for i, tret in enumerate(trets): #이 개별 return마다 최소 risk 찾기
\n
"
,
" n_assets = len(self.data.columns)
\n
"
,
" w0 = np.ones(n_assets) / n_assets
\n
"
,
" fun = lambda w: np.dot(w.T ,np.dot(self.cov, w))
\n
"
,
" constraints = [{'type': 'eq', 'fun': lambda x: np.sum(x) - 1},
\n
"
,
" {'type': 'ineq', 'fun': lambda x: np.dot(x, self.mu) - tret}]
\n
"
,
" #{'type': 'ineq', 'fun': lambda x: x}]
\n
"
,
" bd = ((0,1),) * n_assets
\n
"
,
"
\n
"
,
" minvol = minimize(fun, w0, method='SLSQP',bounds = bd, constraints=constraints)
\n
"
,
" tvols.append(np.sqrt(np.dot(minvol.x, np.dot(self.cov, minvol.x))))
\n
"
,
"
\n
"
,
" pnumber = '{}point'.format(i+1)
\n
"
,
" efpoints[pnumber] = minvol.x.tolist()
\n
"
,
"
\n
"
,
" if self.data.shape[0] <= 1:
\n
"
,
" error = '기간에러'
\n
"
,
" return error,1,1
\n
"
,
" else:
\n
"
,
" ret_vol = {
\"
GMV
\"
: [vol_gmv, ret_gmv],
\"
MaxSharp
\"
: [vol_ms, ret_ms],
\"
RiskParity
\"
: [vol_rp, ret_rp],
\"
Trets
\"
: trets.tolist(),
\"
Tvols
\"
: tvols,
\"
User
\"
: [user_risk,user_ret]} #,
\"
Recommended
\"
: rec_rs}
\n
"
,
" return ret_vol, json.dumps(efpoints), json.dumps(weights)"
]
},
{
"cell_type"
:
"code"
,
"execution_count"
:
122
,
"execution_count"
:
8
,
"metadata"
:
{},
"outputs"
:
[],
"source"
:
[
"class back_test:
\n
"
,
" # 단순 일별수익률의 평균을 *365하여 연간수익률을 산출
\n
"
,
" def __init__(self):
\n
"
,
" self.test = 0
\n
"
,
"
\n
"
,
" def Arithmetic_Mean_Annual(self,ret):
\n
"
,
" month_return = np.mean(ret)
\n
"
,
" return (month_return*252)
\n
"
,
"
\n
"
,
" # 기간중 투자했을때 하락할 수 있는 비율
\n
"
,
" def dd(self,ret):
\n
"
,
" cum_ret = (1 + ret).cumprod()
\n
"
,
" max_drawdown = 0
\n
"
,
" max_ret = 1
\n
"
,
" dd_list = []
\n
"
,
" c = 0
\n
"
,
" for ix_ret in cum_ret.values:
\n
"
,
" if max_ret < ix_ret:
\n
"
,
" max_ret = ix_ret
\n
"
,
" dd_list.append((ix_ret - max_ret) / max_ret)
\n
"
,
" c= c+1
\n
"
,
" return dd_list
\n
"
,
"
\n
"
,
" # 기간중 투자했을때 최고로 많이 하락할 수 있는 비율
\n
"
,
" def mdd(self,ret):
\n
"
,
"
\n
"
,
" cum_ret = (1 + ret).cumprod()
\n
"
,
" max_drawdown = 0
\n
"
,
" max_ret = 1
\n
"
,
" for ix_ret in cum_ret.values:
\n
"
,
" if max_drawdown > (ix_ret - max_ret) / max_ret:
\n
"
,
" max_drawdown = (ix_ret - max_ret) / max_ret
\n
"
,
" if max_ret < ix_ret:
\n
"
,
" max_ret = ix_ret
\n
"
,
"
\n
"
,
" return abs(max_drawdown)
\n
"
,
"
\n
"
,
" # 포트폴리오 수익률에서 무위험 수익률을 제한 후 이를 포트폴리오의 표준편차로 나눠 산출한 값, 즉 위험대비 얼마나 수익이 좋은지의 척도
\n
"
,
" def sharpe_ratio(self,ret, rf=0.008, num_of_date=252):
\n
"
,
"
\n
"
,
" return ((np.mean(ret - (rf / num_of_date))) / (np.std(ret))) * np.sqrt(num_of_date)
\n
"
,
"
\n
"
,
" # 설정한 confidence level에 따른(95%) 확률로 발생할 수 있는 손실액의 최대 액수
\n
"
,
" def value_at_risk(self,ret, para_or_hist=
\"
para
\"
, confidence_level=0.95):
\n
"
,
"
\n
"
,
" vol = np.std(ret)
\n
"
,
" if para_or_hist ==
\"
para
\"
:
\n
"
,
" VaR = np.mean(ret) - vol * norm.ppf(confidence_level)
\n
"
,
" else:
\n
"
,
" print('error')
\n
"
,
"
\n
"
,
" return VaR
\n
"
,
"
\n
"
,
" # 전체 투자기간에서 상승한 ( ret > 0 ) 기간의 비율
\n
"
,
" def winning_rate(self,ret):
\n
"
,
" var_winning_rate = np.sum(ret > 0) / len(ret)
\n
"
,
" return var_winning_rate
\n
"
,
"
\n
"
,
" # 상승한날의 평균상승값을 하락한날의 평균하락값으로 나눈 비율
\n
"
,
" def profit_loss_ratio(self,ret):
\n
"
,
"
\n
"
,
" if np.sum(ret > 0) == 0:
\n
"
,
" var_profit_loss_ratio = 0
\n
"
,
" elif np.sum(ret < 0) == 0:
\n
"
,
" var_profit_loss_ratio = np.inf
\n
"
,
" else:
\n
"
,
" win_mean = np.mean(ret[ret > 0])
\n
"
,
" loss_mean = np.mean(ret[ret < 0])
\n
"
,
" var_profit_loss_ratio = win_mean / loss_mean
\n
"
,
" return abs(var_profit_loss_ratio)
\n
"
,
"
\n
"
,
" # 데이터 취합하는 코드
\n
"
,
" #임시로 5가지 데이터 예시를 활용해 코드작성
\n
"
,
" # 선택한 종목의 이름과 비중, 투자기간을 input 값으로 받음
\n
"
,
"
\n
"
,
" def backtest_data(self, assets,weight,start_data_1, end_data_1,start_amount,rebalancing_month, interval, opt_option):
\n
"
,
" # input으로 받는 assetnames 입력
\n
"
,
" a = assets
\n
"
,
" stock_num = len(a)
\n
"
,
" # input으로 받는 assetweights 입력
\n
"
,
" rebal_month = int(rebalancing_month)
\n
"
,
" # input으로 받는 rebalancing_month를 입력
\n
"
,
" # 나타내는 데이터 간격을 표시
\n
"
,
"
\n
"
,
" # weight 간격
\n
"
,
" b = list(map(float, weight))
\n
"
,
"
\n
"
,
"
\n
"
,
" # input으로 받는 from_period와 to_period 입력
\n
"
,
" stock_return = pd.date_range(start=start_data_1, end=end_data_1)
\n
"
,
" stock_return = pd.DataFrame(stock_return)
\n
"
,
" stock_return.columns = ['Date']
\n
"
,
"
\n
"
,
" stocks = pd.read_csv('stockcodename.csv', index_col=0)
\n
"
,
" symbol = ''
\n
"
,
" asset_name = assets[:]
\n
"
,
" for k in range(len(assets)):
\n
"
,
" for i in enumerate(stocks.Name):
\n
"
,
" if i[1] == assets[k]:
\n
"
,
" assets[k] = (stocks.iloc[i[0]].Symbol)
\n
"
,
" break
\n
"
,
"
\n
"
,
" # input으로 받는 from_period와 to_period 입력
\n
"
,
" stock_return = pd.date_range(start=start_data_1, end=end_data_1)
\n
"
,
" stock_return = pd.DataFrame(stock_return)
\n
"
,
" stock_return.columns = ['Date']
\n
"
,
"
\n
"
,
"
\n
"
,
" for asset in assets: #total_list:
\n
"
,
" tmp = fdr.DataReader(asset,start_data_1,end_data_1)
\n
"
,
" tmp.insert(1,
\"
Date
\"
,tmp.index.copy(),True)
\n
"
,
" tmp = tmp[['Date','Change']]
\n
"
,
" tmp.columns = ['Date',asset]
\n
"
,
" tmp = tmp.reset_index(drop=True)
\n
"
,
" stock_return = pd.merge(stock_return,tmp,how='inner', on='Date')
\n
"
,
"
\n
"
,
" stock_return = stock_return.dropna(axis=0)
\n
"
,
"
\n
"
,
" #print(stock_return)
\n
"
,
" if opt_option == 'basic' :
\n
"
,
"
\n
"
,
" # 투자비중으로 이루어진 dataframe 만들기
\n
"
,
"
\n
"
,
" start_datetime = stock_return.iloc[0,0]
\n
"
,
" end_datetime = stock_return.iloc[-1,0]
\n
"
,
" diff_months_list = list(rrule.rrule(rrule.MONTHLY, dtstart=start_datetime, until=end_datetime))
\n
"
,
" month_gap = len(diff_months_list)
\n
"
,
" rebal_roof = month_gap//rebal_month
\n
"
,
" rebal_weight = pd.DataFrame()
\n
"
,
"
\n
"
,
" for i in range(rebal_roof+1):
\n
"
,
" # 데이터로부터 리밸런싱기간만큼 가져오기
\n
"
,
" filtered_df =stock_return.loc[stock_return[
\"
Date
\"
].between(start_datetime,
\n
"
,
" start_datetime + relativedelta(months=rebal_month)+relativedelta(days = -1))]
\n
"
,
" # 리밸런싱 기간의 누적수익률 산출
\n
"
,
" for j in range(stock_num):
\n
"
,
" filtered_df.iloc[:,j+1] = (1 + filtered_df.iloc[:,j+1]).cumprod()
\n
"
,
" # 해당 누적수익률에 initial 투자비중을 곱해준다
\n
"
,
" for j in range(stock_num):
\n
"
,
" filtered_df.iloc[:,j+1] = filtered_df.iloc[:,j+1]*float(b[j])
\n
"
,
" # 이후 각각의 종목의 비중을 계산해서 산출한다
\n
"
,
" filtered_df['total_value'] = filtered_df.sum(axis=1)
\n
"
,
" for j in range(stock_num):
\n
"
,
" filtered_df.iloc[:,j+1] = filtered_df.iloc[:,j+1]/filtered_df['total_value']
\n
"
,
"
\n
"
,
" rebal_weight = pd.concat([rebal_weight,filtered_df])
\n
"
,
" start_datetime = start_datetime + relativedelta(months=rebal_month)
\n
"
,
"
\n
"
,
" #final_day = monthrange(start_datetime.year, start_datetime.month)
\n
"
,
"
\n
"
,
" stock_weight = rebal_weight.iloc[:,:-1]
\n
"
,
" #print(stock_weight)
\n
"
,
" '''
\n
"
,
" stock_weight = stock_return.Date
\n
"
,
" stock_weight = pd.DataFrame(stock_weight)
\n
"
,
" c = 0
\n
"
,
" for stockweight in b:
\n
"
,
" stock_weight[a[c]] = float(stockweight)
\n
"
,
" c = c + 1
\n
"
,
" #print(stock_weight)
\n
"
,
" '''
\n
"
,
" else :
\n
"
,
" # 포트폴리오 최적화 코드를 통한 리벨런싱 이중 리스트 weight 산출
\n
"
,
" # 1. 입력 받은 start ~ end 날짜를 리밸런싱 기간으로 쪼개기
\n
"
,
" opt_start_datetime = stock_return.iloc[0,0]
\n
"
,
" opt_end_datetime = stock_return.iloc[-1,0]
\n
"
,
" opt_diff_months_list = list(rrule.rrule(rrule.MONTHLY, dtstart=opt_start_datetime, until=opt_end_datetime))
\n
"
,
" opt_month_gap = len(opt_diff_months_list)
\n
"
,
" opt_rebal_roof = opt_month_gap//rebal_month
\n
"
,
" opt_rebal_weight = pd.DataFrame()
\n
"
,
" #opt_array = [[0]*stock_num]*(opt_rebal_roof+1)
\n
"
,
"
\n
"
,
" for i in range(opt_rebal_roof+1):
\n
"
,
" opt_df = stock_return.loc[stock_return[
\"
Date
\"
].between(opt_start_datetime,opt_start_datetime + relativedelta(months=rebal_month)+relativedelta(days = -1))]
\n
"
,
" # 최적화 코드에서 기간마다의 가중치를 가져온다
\n
"
,
" c_m = c_Models(a,b,opt_df.iat[0,0]- relativedelta(months=3),opt_df.iat[-1,0])
\n
"
,
" ret_vol, efpoints, weights = c_m.plotting()
\n
"
,
" weights = literal_eval(weights)
\n
"
,
" weights = weights.get(opt_option)
\n
"
,
" ##print(weights)
\n
"
,
" # 리밸런싱 기간의 누적수익률 산출
\n
"
,
" for j in range(stock_num):
\n
"
,
" opt_df.iloc[:,j+1] = (1 + opt_df.iloc[:,j+1]).cumprod()
\n
"
,
" # 해당 누적수익률에 initial 투자비중을 곱해준다
\n
"
,
" for j in range(stock_num):
\n
"
,
" opt_df.iloc[:,j+1] = opt_df.iloc[:,j+1]*float(weights[j])
\n
"
,
" # 이후 각각의 종목의 비중을 계산해서 산출한다
\n
"
,
" opt_df['total_value'] = opt_df.sum(axis=1)
\n
"
,
" for j in range(stock_num):
\n
"
,
" opt_df.iloc[:,j+1] = opt_df.iloc[:,j+1]/opt_df['total_value']
\n
"
,
"
\n
"
,
" # 이후 각각의 종목의 비중을 계산해서 산출한다
\n
"
,
" #print(opt_df)
\n
"
,
" opt_rebal_weight = pd.concat([opt_rebal_weight,opt_df])
\n
"
,
" opt_start_datetime = opt_start_datetime + relativedelta(months=rebal_month)
\n
"
,
" #리밸런싱으로 start 기간이 고객이 원하는 end 기간보다 커지게 되면 종료
\n
"
,
" if opt_start_datetime > stock_return.iloc[-1,0]: # i가 100일 때
\n
"
,
" break
\n
"
,
" stock_weight = opt_rebal_weight.iloc[:,:-1]
\n
"
,
" ##print(stock_weight)
\n
"
,
" # 수익률 데이터와 투자비중을 곱한 하나의 데이터 생성
\n
"
,
" pfo_return = stock_weight.Date
\n
"
,
" pfo_return = pd.DataFrame(pfo_return)
\n
"
,
" # weight 와 return의 날짜 맞춰주기
\n
"
,
" #pfo_return = pfo_return[0:len(stock_weight)]
\n
"
,
" pfo_return = pd.merge(pfo_return, stock_return, left_on='Date', right_on='Date', how='left')
\n
"
,
" pfo_return['mean_return'] = 0
\n
"
,
" ##print(pfo_return)
\n
"
,
" for i in range(0,len(pfo_return)):
\n
"
,
" return_result = list(pfo_return.iloc[i,1:1+stock_num])
\n
"
,
" return_weight = list(stock_weight.iloc[i,1:1+stock_num])
\n
"
,
" pfo_return.iloc[i,1+stock_num] = np.dot(return_result,return_weight)
\n
"
,
" #rint(pfo_return)
\n
"
,
" pfo_return['acc_return'] = [x+1 for x in pfo_return['mean_return']]
\n
"
,
" pfo_return['acc_return'] = list(it.accumulate(pfo_return['acc_return'], operator.mul))
\n
"
,
" pfo_return['acc_return'] = [x-1 for x in pfo_return['acc_return']]
\n
"
,
" pfo_return['final_balance'] = float(start_amount) + float(start_amount)*pfo_return['acc_return']
\n
"
,
" pfo_return['Drawdown_list'] = back_test.dd(input,pfo_return['mean_return'])
\n
"
,
" pfo_return = pfo_return.set_index('Date')
\n
"
,
" #print(pfo_return)
\n
"
,
"
\n
"
,
"
\n
"
,
" ### 벤치마크 데이터 로드 및 전처리
\n
"
,
"
\n
"
,
" tiker_list = ['KS11','US500']
\n
"
,
" bench_list = [fdr.DataReader(ticker, start_data_1, end_data_1)['Change'] for ticker in tiker_list]
\n
"
,
" bench = pd.concat(bench_list, axis=1)
\n
"
,
" bench.columns = ['KOSPI', 'S&P500']
\n
"
,
" bench['KOSPI'] = bench['KOSPI'].fillna(0)
\n
"
,
" bench['S&P500'] = bench['S&P500'].fillna(0)
\n
"
,
" #bench = bench.dropna()
\n
"
,
"
\n
"
,
" # 벤치마크 누적수익률, DD 값
\n
"
,
"
\n
"
,
" bench['KOSPI_acc'] = [x+1 for x in bench['KOSPI']]
\n
"
,
" bench['KOSPI_acc'] = list(it.accumulate(bench['KOSPI_acc'], operator.mul))
\n
"
,
" bench['KOSPI_acc'] = [x-1 for x in bench['KOSPI_acc']]
\n
"
,
" bench['KOSPI_balance'] = float(start_amount) + float(start_amount)*bench['KOSPI_acc']
\n
"
,
" bench['KOSPI_Drawdown'] = back_test.dd(input,bench['KOSPI'])
\n
"
,
" bench['S&P500_acc'] = [x+1 for x in bench['S&P500']]
\n
"
,
" bench['S&P500_acc'] = list(it.accumulate(bench['S&P500_acc'], operator.mul))
\n
"
,
" bench['S&P500_acc'] = [x-1 for x in bench['S&P500_acc']]
\n
"
,
" bench['S&P500_balance'] = float(start_amount) + float(start_amount)*bench['S&P500_acc']
\n
"
,
" bench['S&P500_Drawdown'] = back_test.dd(input,bench['S&P500'])
\n
"
,
"
\n
"
,
" if interval == 'monthly' or interval == 'weekly' :
\n
"
,
" if interval == 'monthly' :
\n
"
,
" inter = 'M'
\n
"
,
" if interval == 'weekly' :
\n
"
,
" inter = 'W'
\n
"
,
" pfo_return_interval = pfo_return.resample(inter).last()
\n
"
,
" pfo_return_first = pd.DataFrame(pfo_return.iloc[0]).transpose()
\n
"
,
" pfo_return_interval = pd.concat([pfo_return_first, pfo_return_interval])
\n
"
,
" pfo_return_interval['mean_return'] = pfo_return_interval['final_balance'].pct_change()
\n
"
,
" pfo_return_interval = pfo_return_interval.dropna()
\n
"
,
"
\n
"
,
" # 월별 간격으로 만들어주기, 여기서는 return과 value만 monthly로 산출함 나머지값은 daily
\n
"
,
" bench_interval = bench.resample(inter).last()
\n
"
,
" #bench_ex['KOSPI'] = bench_ex['final_balance'].pct_change()
\n
"
,
" bench_first = pd.DataFrame(bench.iloc[0]).transpose()
\n
"
,
" bench_interval = pd.concat([bench_first, bench_interval])
\n
"
,
" bench_interval['KOSPI'] = bench_interval['KOSPI_balance'].pct_change()
\n
"
,
" bench_interval['S&P500'] = bench_interval['S&P500_balance'].pct_change()
\n
"
,
" bench_interval = bench_interval.dropna()
\n
"
,
"
\n
"
,
" # 날짜타입 열로 만들기 및 str 타입으로 전처리
\n
"
,
" pfo_return = pfo_return.rename_axis('Date').reset_index()
\n
"
,
" pfo_return['Date'] = pd.to_datetime(pfo_return['Date'], format='%d/%m/%Y').dt.date
\n
"
,
" pfo_return['Date'] = list(map(str, pfo_return['Date']))
\n
"
,
"
\n
"
,
" pfo_return_interval = pfo_return_interval.rename_axis('Date').reset_index()
\n
"
,
" pfo_return_interval['Date'] = pd.to_datetime(pfo_return_interval['Date'], format='%d/%m/%Y').dt.date
\n
"
,
" pfo_return_interval['Date'] = list(map(str, pfo_return_interval['Date']))
\n
"
,
"
\n
"
,
" bench = bench.rename_axis('Date').reset_index()
\n
"
,
" bench['Date'] = pd.to_datetime(bench['Date'], format='%d/%m/%Y').dt.date
\n
"
,
" bench['Date'] = list(map(str, bench['Date']))
\n
"
,
"
\n
"
,
" bench_interval = bench_interval.rename_axis('Date').reset_index()
\n
"
,
" bench_interval['Date'] = pd.to_datetime(bench_interval['Date'], format='%d/%m/%Y').dt.date
\n
"
,
" bench_interval['Date'] = list(map(str, bench_interval['Date']))
\n
"
,
"
\n
"
,
" backtest_return = {
\n
"
,
" 'pfo_return': [
\n
"
,
" {
\n
"
,
" 'Date': list(pfo_return_interval['Date']),
\n
"
,
" 'mean_return': list(pfo_return_interval['mean_return']),
\n
"
,
" 'acc_return ratio': list(pfo_return_interval['acc_return']),
\n
"
,
" 'final_balance': list(pfo_return_interval['final_balance']),
\n
"
,
" 'Drawdown_list' : list(pfo_return_interval['Drawdown_list'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'bench': [
\n
"
,
" {
\n
"
,
" 'Date': list(bench_interval['Date']),
\n
"
,
" 'KOSPI_return': list(bench_interval['KOSPI']),
\n
"
,
" 'S&P500_return': list(bench_interval['S&P500']),
\n
"
,
" 'KOSPI_acc_return': list(bench_interval['KOSPI_acc']),
\n
"
,
" 'KOSPI_balance' : list(bench_interval['KOSPI_balance']),
\n
"
,
" 'KOSPI_Drawdown': list(bench_interval['KOSPI_Drawdown']),
\n
"
,
" 'S&P500_acc_return': list(bench_interval['S&P500_acc']),
\n
"
,
" 'S&P500_balance' : list(bench_interval['S&P500_balance']),
\n
"
,
" 'S&P500_Drawdown': list(bench_interval['S&P500_Drawdown'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'indicator': [
\n
"
,
" {
\n
"
,
" 'Mean': back_test.Arithmetic_Mean_Annual(input,pfo_return['mean_return']),
\n
"
,
" 'Std': pfo_return['mean_return'].std() * np.sqrt(365),
\n
"
,
" 'Sharpe ratio': back_test.sharpe_ratio(input,pfo_return['mean_return']),
\n
"
,
" 'VaR': back_test.value_at_risk(input,pfo_return['mean_return']),
\n
"
,
" 'MDD': back_test.mdd(input,pfo_return['mean_return']),
\n
"
,
" 'Winning ratio': back_test.winning_rate(input,pfo_return['mean_return']),
\n
"
,
" 'Gain/Loss Ratio': back_test.profit_loss_ratio(input,pfo_return['mean_return'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'KOSPI_indicator': [
\n
"
,
" {
\n
"
,
" 'Mean': back_test.Arithmetic_Mean_Annual(input,bench['KOSPI']),
\n
"
,
" 'Std': bench['KOSPI'].std() * np.sqrt(365),
\n
"
,
" 'Sharpe ratio': back_test.sharpe_ratio(input,bench['KOSPI']),
\n
"
,
" 'VaR': back_test.value_at_risk(input,bench['KOSPI']),
\n
"
,
" 'MDD': back_test.mdd(input,bench['KOSPI']),
\n
"
,
" 'Winning ratio': back_test.winning_rate(input,bench['KOSPI']),
\n
"
,
" 'Gain/Loss Ratio': back_test.profit_loss_ratio(input,bench['KOSPI'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'S&P500_indicator': [
\n
"
,
" {
\n
"
,
" 'Mean': back_test.Arithmetic_Mean_Annual(input,bench['S&P500']),
\n
"
,
" 'Std': bench['S&P500'].std() * np.sqrt(365),
\n
"
,
" 'Sharpe ratio': back_test.sharpe_ratio(input,bench['S&P500']),
\n
"
,
" 'VaR': back_test.value_at_risk(input,bench['S&P500']),
\n
"
,
" 'MDD': back_test.mdd(input,bench['S&P500']),
\n
"
,
" 'Winning ratio': back_test.winning_rate(input,bench['S&P500']),
\n
"
,
" 'Gain/Loss Ratio': back_test.profit_loss_ratio(input,bench['S&P500'])
\n
"
,
" }
\n
"
,
" ]
\n
"
,
" }
\n
"
,
"
\n
"
,
" else :
\n
"
,
" # 날짜타입 열로 만들기 및 str 타입으로 전처리
\n
"
,
" pfo_return = pfo_return.rename_axis('Date').reset_index()
\n
"
,
" pfo_return['Date'] = pd.to_datetime(pfo_return['Date'], format='%d/%m/%Y').dt.date
\n
"
,
" pfo_return['Date'] = list(map(str, pfo_return['Date']))
\n
"
,
"
\n
"
,
" bench = bench.rename_axis('Date').reset_index()
\n
"
,
" bench['Date'] = pd.to_datetime(bench['Date'], format='%d/%m/%Y').dt.date
\n
"
,
" bench['Date'] = list(map(str, bench['Date']))
\n
"
,
" backtest_return = {
\n
"
,
" 'pfo_return': [
\n
"
,
" {
\n
"
,
" 'Date': list(pfo_return['Date']),
\n
"
,
" 'mean_return': list(pfo_return['mean_return']),
\n
"
,
" 'acc_return ratio': list(pfo_return['acc_return']),
\n
"
,
" 'final_balance': list(pfo_return['final_balance']),
\n
"
,
" 'Drawdown_list' : list(pfo_return['Drawdown_list'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'bench': [
\n
"
,
" {
\n
"
,
" 'Date': list(bench['Date']),
\n
"
,
" 'KOSPI_return': list(bench['KOSPI']),
\n
"
,
" 'S&P500_return': list(bench['S&P500']),
\n
"
,
" 'KOSPI_acc_return': list(bench['KOSPI_acc']),
\n
"
,
" 'KOSPI_balance' : list(bench['KOSPI_balance']),
\n
"
,
" 'KOSPI_Drawdown': list(bench['KOSPI_Drawdown']),
\n
"
,
" 'S&P500_acc_return': list(bench['S&P500_acc']),
\n
"
,
" 'S&P500_balance' : list(bench['S&P500_balance']),
\n
"
,
" 'S&P500_Drawdown': list(bench['S&P500_Drawdown'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'indicator': [
\n
"
,
" {
\n
"
,
" 'Mean': back_test.Arithmetic_Mean_Annual(input,pfo_return['mean_return']),
\n
"
,
" 'Std': pfo_return['mean_return'].std() * np.sqrt(365),
\n
"
,
" 'Sharpe ratio': back_test.sharpe_ratio(input,pfo_return['mean_return']),
\n
"
,
" 'VaR': back_test.value_at_risk(input,pfo_return['mean_return']),
\n
"
,
" 'MDD': back_test.mdd(input,pfo_return['mean_return']),
\n
"
,
" 'Winning ratio': back_test.winning_rate(input,pfo_return['mean_return']),
\n
"
,
" 'Gain/Loss Ratio': back_test.profit_loss_ratio(input,pfo_return['mean_return'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'KOSPI_indicator': [
\n
"
,
" {
\n
"
,
" 'Mean': back_test.Arithmetic_Mean_Annual(input,bench['KOSPI']),
\n
"
,
" 'Std': bench['KOSPI'].std() * np.sqrt(365),
\n
"
,
" 'Sharpe ratio': back_test.sharpe_ratio(input,bench['KOSPI']),
\n
"
,
" 'VaR': back_test.value_at_risk(input,bench['KOSPI']),
\n
"
,
" 'MDD': back_test.mdd(input,bench['KOSPI']),
\n
"
,
" 'Winning ratio': back_test.winning_rate(input,bench['KOSPI']),
\n
"
,
" 'Gain/Loss Ratio': back_test.profit_loss_ratio(input,bench['KOSPI'])
\n
"
,
" }
\n
"
,
" ],
\n
"
,
" 'S&P500_indicator': [
\n
"
,
" {
\n
"
,
" 'Mean': back_test.Arithmetic_Mean_Annual(input,bench['S&P500']),
\n
"
,
" 'Std': bench['S&P500'].std() * np.sqrt(365),
\n
"
,
" 'Sharpe ratio': back_test.sharpe_ratio(input,bench['S&P500']),
\n
"
,
" 'VaR': back_test.value_at_risk(input,bench['S&P500']),
\n
"
,
" 'MDD': back_test.mdd(input,bench['S&P500']),
\n
"
,
" 'Winning ratio': back_test.winning_rate(input,bench['S&P500']),
\n
"
,
" 'Gain/Loss Ratio': back_test.profit_loss_ratio(input,bench['S&P500'])
\n
"
,
" }
\n
"
,
" ]
\n
"
,
" }
\n
"
,
"
\n
"
,
"
\n
"
,
"
\n
"
,
" return backtest_return"
]
},
{
"cell_type"
:
"code"
,
"execution_count"
:
5
,
"metadata"
:
{
"scrolled"
:
true
},
"outputs"
:
[
{
"data"
:
{
"text/plain"
:
[
"{'삼성전자': 0.727, 'LG전자': 0.0, '카카오': 0.273}"
"({'GMV': [0.1858389981988727, 0.21203948231342723],
\n
"
,
" 'MaxSharp': [0.18671958740979067, 0.2139596912413942],
\n
"
,
" 'RiskParity': [0.20795176890404793, 0.21559809699947152],
\n
"
,
" 'Trets': [0.21203948231342723,
\n
"
,
" 0.21294773988703294,
\n
"
,
" 0.21385599746063863,
\n
"
,
" 0.21476425503424434,
\n
"
,
" 0.21567251260785003,
\n
"
,
" 0.21658077018145575,
\n
"
,
" 0.21748902775506146,
\n
"
,
" 0.21839728532866715,
\n
"
,
" 0.21930554290227286,
\n
"
,
" 0.22021380047587857,
\n
"
,
" 0.22112205804948426,
\n
"
,
" 0.22203031562308997,
\n
"
,
" 0.22293857319669566,
\n
"
,
" 0.22384683077030137,
\n
"
,
" 0.2247550883439071,
\n
"
,
" 0.22566334591751278,
\n
"
,
" 0.2265716034911185,
\n
"
,
" 0.2274798610647242,
\n
"
,
" 0.2283881186383299,
\n
"
,
" 0.2292963762119356,
\n
"
,
" 0.2302046337855413,
\n
"
,
" 0.231112891359147,
\n
"
,
" 0.23202114893275272,
\n
"
,
" 0.2329294065063584,
\n
"
,
" 0.23383766407996412,
\n
"
,
" 0.23474592165356983,
\n
"
,
" 0.23565417922717552,
\n
"
,
" 0.23656243680078123,
\n
"
,
" 0.23747069437438692,
\n
"
,
" 0.23837895194799263],
\n
"
,
" 'Tvols': [0.1858389981987725,
\n
"
,
" 0.18603980681220794,
\n
"
,
" 0.1866279462874506,
\n
"
,
" 0.18759977333781017,
\n
"
,
" 0.1889493691852574,
\n
"
,
" 0.19066871063768298,
\n
"
,
" 0.19274790351948154,
\n
"
,
" 0.1951754484924986,
\n
"
,
" 0.19793852894483158,
\n
"
,
" 0.2010233088543836,
\n
"
,
" 0.20441522611870286,
\n
"
,
" 0.2080992618687252,
\n
"
,
" 0.21206019173944504,
\n
"
,
" 0.2162828032765981,
\n
"
,
" 0.2207520817102024,
\n
"
,
" 0.22545335668848557,
\n
"
,
" 0.230372426146853,
\n
"
,
" 0.2354956425317311,
\n
"
,
" 0.24080997621842265,
\n
"
,
" 0.24630305688942764,
\n
"
,
" 0.25196319408160095,
\n
"
,
" 0.25777938370335385,
\n
"
,
" 0.2637413018896242,
\n
"
,
" 0.2698392894833826,
\n
"
,
" 0.2760643295664975,
\n
"
,
" 0.2824080201417004,
\n
"
,
" 0.288862545420281,
\n
"
,
" 0.29542064013059877,
\n
"
,
" 0.3020755588277587,
\n
"
,
" 0.3088210356801474],
\n
"
,
" 'User': [0.25113519989524385, 0.2169925302290805]},
\n
"
,
" '{
\"
1point
\"
: [0.7270000003851871, 0.0, 0.272999999614813],
\"
2point
\"
: [0.701931034333944, 0.0, 0.29806896566605606],
\"
3point
\"
: [0.6768620689166184, 0.0, 0.32313793108338174],
\"
4point
\"
: [0.6517931118624203, 0.0, 0.3482068881375798],
\"
5point
\"
: [0.6267241338618083, 0.0, 0.37327586613819175],
\"
6point
\"
: [0.6016551680700105, 0.0, 0.3983448319299896],
\"
7point
\"
: [0.5765862068839319, 1.0625181290357943e-17, 0.42341379311606814],
\"
8point
\"
: [0.5515172410659429, 3.469446951953614e-17, 0.4484827589340571],
\"
9point
\"
: [0.5264482750472359, 0.0, 0.47355172495276426],
\"
10point
\"
: [0.501379313669539, 0.0, 0.49862068633046097],
\"
11point
\"
: [0.4763103453392776, 2.0816681711721685e-17, 0.5236896546607225],
\"
12point
\"
: [0.45124137745684034, 0.0, 0.5487586225431597],
\"
13point
\"
: [0.426172412012964, 0.0, 0.573827587987036],
\"
14point
\"
: [0.40110344933541525, 6.938893903907228e-18, 0.5988965506645848],
\"
15point
\"
: [0.376034481762012, 2.0816681711721685e-17, 0.623965518237988],
\"
16point
\"
: [0.3509655176307255, 0.0, 0.6490344823692745],
\"
17point
\"
: [0.3258965525334883, 0.0, 0.6741034474665117],
\"
18point
\"
: [0.300827586518259, 4.163336342344337e-17, 0.699172413481741],
\"
19point
\"
: [0.2757586213143949, 4.163336342344337e-17, 0.724241378685605],
\"
20point
\"
: [0.2506896557025046, 2.7755575615628914e-17, 0.7493103442974953],
\"
21point
\"
: [0.22562069008962146, 3.122502256758253e-17, 0.7743793099103785],
\"
22point
\"
: [0.2005517244760024, 0.0, 0.7994482755239977],
\"
23point
\"
: [0.17548275872525812, 1.3877787807814457e-17, 0.8245172412747419],
\"
24point
\"
: [0.15041379287609932, 0.0, 0.8495862071239008],
\"
25point
\"
: [0.12534482753941983, 1.6046192152785466e-17, 0.8746551724605802],
\"
26point
\"
: [0.10027586412659925, 1.1102230246251565e-16, 0.8997241358734006],
\"
27point
\"
: [0.07520689805671478, 0.0, 0.9247931019432855],
\"
28point
\"
: [0.050137931954542976, 0.0, 0.9498620680454571],
\"
29point
\"
: [0.025068965852762182, 0.0, 0.9749310341472383],
\"
30point
\"
: [0.0, 3.533653586407226e-08, 0.9999999646634645]}',
\n
"
,
" '{
\"
gmv
\"
: [0.727, 0.0, 0.273],
\"
ms
\"
: [0.674, 0.0, 0.326],
\"
rp
\"
: [0.443, 0.238, 0.319]}')"
]
},
"execution_count"
:
122
,
"execution_count"
:
5
,
"metadata"
:
{},
"output_type"
:
"execute_result"
}
],
"source"
:
[
"#gmv 포트폴리오 -> 해당 종목을 각각 몇 퍼센트로 투자해야 위험이 제일 적은가
\n
"
,
"c_Models(['삼성전자','LG전자','카카오'],[0
,0,0],'2015-01-01','2021-04-01').gmv_opt
()"
"c_Models(['삼성전자','LG전자','카카오'],[0
.2,0.5,0.3],'2015-01-01','2021-04-01').plotting
()"
]
},
{
"cell_type"
:
"code"
,
"execution_count"
:
124
,
"execution_count"
:
6
,
"metadata"
:
{},
"outputs"
:
[
{
...
...
@@ -210,7 +755,7 @@
"{'삼성전자': 0.674, 'LG전자': 0.0, '카카오': 0.326}"
]
},
"execution_count"
:
124
,
"execution_count"
:
6
,
"metadata"
:
{},
"output_type"
:
"execute_result"
}
...
...
@@ -222,7 +767,7 @@
},
{
"cell_type"
:
"code"
,
"execution_count"
:
125
,
"execution_count"
:
39
,
"metadata"
:
{},
"outputs"
:
[
{
...
...
@@ -231,7 +776,7 @@
"{'삼성전자': 0.443, 'LG전자': 0.238, '카카오': 0.319}"
]
},
"execution_count"
:
125
,
"execution_count"
:
39
,
"metadata"
:
{},
"output_type"
:
"execute_result"
}
...
...
@@ -240,6 +785,219 @@
"#risk parity -> 포트폴리오에 대한 자산 위험 비중을 동일하게 조정, 즉 삼전,lg,카카오의 포트폴리오 위험 기여도를 0.33으로 하게 만드는 비중
\n
"
,
"c_Models(['삼성전자','LG전자','카카오'],[0,0,0],'2015-01-01','2021-04-01').rp_opt()"
]
},
{
"cell_type"
:
"code"
,
"execution_count"
:
null
,
"metadata"
:
{
"scrolled"
:
true
},
"outputs"
:
[],
"source"
:
[
"#def backtest_data(self,assets,weight,start_data_1, end_data_1,start_amount,rebalancing_month, interval, opt_option):
\n
"
,
"back_test().backtest_data(['삼성전자','LG전자'],[0.9,0.1],'2010-01-01', '2021-01-01',10000000,3, 'monthly', 'basic')"
]
},
{
"cell_type"
:
"code"
,
"execution_count"
:
185
,
"metadata"
:
{
"scrolled"
:
true
},
"outputs"
:
[
{
"data"
:
{
"text/html"
:
[
"<div>
\n
"
,
"<style scoped>
\n
"
,
" .dataframe tbody tr th:only-of-type {
\n
"
,
" vertical-align: middle;
\n
"
,
" }
\n
"
,
"
\n
"
,
" .dataframe tbody tr th {
\n
"
,
" vertical-align: top;
\n
"
,
" }
\n
"
,
"
\n
"
,
" .dataframe thead th {
\n
"
,
" text-align: right;
\n
"
,
" }
\n
"
,
"</style>
\n
"
,
"<table border=
\"
1
\"
class=
\"
dataframe
\"
>
\n
"
,
" <thead>
\n
"
,
" <tr style=
\"
text-align: right;
\"
>
\n
"
,
" <th></th>
\n
"
,
" <th>Open</th>
\n
"
,
" <th>High</th>
\n
"
,
" <th>Low</th>
\n
"
,
" <th>Close</th>
\n
"
,
" <th>Volume</th>
\n
"
,
" <th>Change</th>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <th>Date</th>
\n
"
,
" <th></th>
\n
"
,
" <th></th>
\n
"
,
" <th></th>
\n
"
,
" <th></th>
\n
"
,
" <th></th>
\n
"
,
" <th></th>
\n
"
,
" </tr>
\n
"
,
" </thead>
\n
"
,
" <tbody>
\n
"
,
" <tr>
\n
"
,
" <td>1997-06-02</td>
\n
"
,
" <td>1215</td>
\n
"
,
" <td>1222</td>
\n
"
,
" <td>1179</td>
\n
"
,
" <td>1190</td>
\n
"
,
" <td>74990</td>
\n
"
,
" <td>nan</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>1997-06-03</td>
\n
"
,
" <td>1190</td>
\n
"
,
" <td>1195</td>
\n
"
,
" <td>1174</td>
\n
"
,
" <td>1176</td>
\n
"
,
" <td>71360</td>
\n
"
,
" <td>-0.012</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>1997-06-04</td>
\n
"
,
" <td>1161</td>
\n
"
,
" <td>1197</td>
\n
"
,
" <td>1161</td>
\n
"
,
" <td>1197</td>
\n
"
,
" <td>85220</td>
\n
"
,
" <td>0.018</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>1997-06-05</td>
\n
"
,
" <td>1193</td>
\n
"
,
" <td>1206</td>
\n
"
,
" <td>1181</td>
\n
"
,
" <td>1188</td>
\n
"
,
" <td>81890</td>
\n
"
,
" <td>-0.008</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>1997-06-07</td>
\n
"
,
" <td>1197</td>
\n
"
,
" <td>1215</td>
\n
"
,
" <td>1190</td>
\n
"
,
" <td>1197</td>
\n
"
,
" <td>32550</td>
\n
"
,
" <td>0.008</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>...</td>
\n
"
,
" <td>...</td>
\n
"
,
" <td>...</td>
\n
"
,
" <td>...</td>
\n
"
,
" <td>...</td>
\n
"
,
" <td>...</td>
\n
"
,
" <td>...</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>2021-05-28</td>
\n
"
,
" <td>79800</td>
\n
"
,
" <td>80400</td>
\n
"
,
" <td>79400</td>
\n
"
,
" <td>80100</td>
\n
"
,
" <td>12360199</td>
\n
"
,
" <td>0.006</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>2021-05-31</td>
\n
"
,
" <td>80300</td>
\n
"
,
" <td>80600</td>
\n
"
,
" <td>79600</td>
\n
"
,
" <td>80500</td>
\n
"
,
" <td>13321324</td>
\n
"
,
" <td>0.005</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>2021-06-01</td>
\n
"
,
" <td>80500</td>
\n
"
,
" <td>81300</td>
\n
"
,
" <td>80100</td>
\n
"
,
" <td>80600</td>
\n
"
,
" <td>14058401</td>
\n
"
,
" <td>0.001</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>2021-06-02</td>
\n
"
,
" <td>80400</td>
\n
"
,
" <td>81400</td>
\n
"
,
" <td>80300</td>
\n
"
,
" <td>80800</td>
\n
"
,
" <td>16414644</td>
\n
"
,
" <td>0.002</td>
\n
"
,
" </tr>
\n
"
,
" <tr>
\n
"
,
" <td>2021-06-03</td>
\n
"
,
" <td>81300</td>
\n
"
,
" <td>83000</td>
\n
"
,
" <td>81100</td>
\n
"
,
" <td>82800</td>
\n
"
,
" <td>29341312</td>
\n
"
,
" <td>0.025</td>
\n
"
,
" </tr>
\n
"
,
" </tbody>
\n
"
,
"</table>
\n
"
,
"<p>6000 rows × 6 columns</p>
\n
"
,
"</div>"
],
"text/plain"
:
[
" Open High Low Close Volume Change
\n
"
,
"Date
\n
"
,
"1997-06-02 1215 1222 1179 1190 74990 nan
\n
"
,
"1997-06-03 1190 1195 1174 1176 71360 -0.012
\n
"
,
"1997-06-04 1161 1197 1161 1197 85220 0.018
\n
"
,
"1997-06-05 1193 1206 1181 1188 81890 -0.008
\n
"
,
"1997-06-07 1197 1215 1190 1197 32550 0.008
\n
"
,
"... ... ... ... ... ... ...
\n
"
,
"2021-05-28 79800 80400 79400 80100 12360199 0.006
\n
"
,
"2021-05-31 80300 80600 79600 80500 13321324 0.005
\n
"
,
"2021-06-01 80500 81300 80100 80600 14058401 0.001
\n
"
,
"2021-06-02 80400 81400 80300 80800 16414644 0.002
\n
"
,
"2021-06-03 81300 83000 81100 82800 29341312 0.025
\n
"
,
"
\n
"
,
"[6000 rows x 6 columns]"
]
},
"execution_count"
:
185
,
"metadata"
:
{},
"output_type"
:
"execute_result"
}
],
"source"
:
[
"df = fdr.DataReader('005930')
\n
"
,
"df"
]
},
{
"cell_type"
:
"code"
,
"execution_count"
:
192
,
"metadata"
:
{},
"outputs"
:
[
{
"name"
:
"stdout"
,
"output_type"
:
"stream"
,
"text"
:
[
"{
\"
gmv
\"
: [0.727, 0.0, 0.273],
\"
ms
\"
: [0.674, 0.0, 0.326],
\"
rp
\"
: [0.443, 0.238, 0.319]}
\n
"
,
"[0.674, 0.0, 0.326]
\n
"
]
}
],
"source"
:
[
"c_m = c_Models(['삼성전자','LG전자','카카오'],[0,0,0],'2015-01-01','2021-04-01')
\n
"
,
"ret_vol, efpoints, weights = c_m.plotting()
\n
"
,
"print(weights)
\n
"
,
"weights = literal_eval(weights)
\n
"
,
"weights = weights.get('ms')
\n
"
,
"print(weights)"
]
}
],
"metadata"
:
{
...
...
Please
register
or
login
to post a comment