.. code:: python
%matplotlib inline
from mxnet import autograd, np, npx
from d2l import mxnet as d2l
npx.set_np()
theta = np.arange(0, 1, 0.001)
p = theta**9 * (1 - theta)**4.
d2l.plot(theta, p, 'theta', 'likelihood')
.. figure:: output_maximum-likelihood_907113_3_0.svg
.. raw:: html

.. raw:: html
.. code:: python
%matplotlib inline
import torch
from d2l import torch as d2l
theta = torch.arange(0, 1, 0.001)
p = theta**9 * (1 - theta)**4.
d2l.plot(theta, p, 'theta', 'likelihood')
.. figure:: output_maximum-likelihood_907113_6_0.svg
.. raw:: html

.. raw:: html
.. code:: python
%matplotlib inline
import tensorflow as tf
from d2l import tensorflow as d2l
theta = tf.range(0, 1, 0.001)
p = theta**9 * (1 - theta)**4.
d2l.plot(theta, p, 'theta', 'likelihood')
.. figure:: output_maximum-likelihood_907113_9_0.svg
.. raw:: html

.. raw:: html
.. code:: python
# Set up our data
n_H = 8675309
n_T = 25624
# Initialize our paramteres
theta = np.array(0.5)
theta.attach_grad()
# Perform gradient descent
lr = 0.00000000001
for iter in range(10):
with autograd.record():
loss = -(n_H * np.log(theta) + n_T * np.log(1 - theta))
loss.backward()
theta -= lr * theta.grad
# Check output
theta, n_H / (n_H + n_T)
.. parsed-literal::
:class: output
(array(0.50172704), 0.9970550284664874)
.. raw:: html

.. raw:: html
.. code:: python
# Set up our data
n_H = 8675309
n_T = 25624
# Initialize our paramteres
theta = torch.tensor(0.5, requires_grad=True)
# Perform gradient descent
lr = 0.00000000001
for iter in range(10):
loss = -(n_H * torch.log(theta) + n_T * torch.log(1 - theta))
loss.backward()
with torch.no_grad():
theta -= lr * theta.grad
theta.grad.zero_()
# Check output
theta, n_H / (n_H + n_T)
.. parsed-literal::
:class: output
(tensor(0.5017, requires_grad=True), 0.9970550284664874)
.. raw:: html

.. raw:: html
.. code:: python
# Set up our data
n_H = 8675309
n_T = 25624
# Initialize our paramteres
theta = tf.Variable(tf.constant(0.5))
# Perform gradient descent
lr = 0.00000000001
for iter in range(10):
with tf.GradientTape() as t:
loss = -(n_H * tf.math.log(theta) + n_T * tf.math.log(1 - theta))
theta.assign_sub(lr * t.gradient(loss, theta))
# Check output
theta, n_H / (n_H + n_T)
.. parsed-literal::
:class: output
(,
0.9970550284664874)
.. raw:: html

.. raw:: html
`Discussions `__
.. raw:: html

.. raw:: html
`Discussions `__
.. raw:: html

.. raw:: html
`Discussions `__
.. raw:: html

.. raw:: html