Skip to content

Commit

Permalink
add a unit test for kxc
Browse files Browse the repository at this point in the history
  • Loading branch information
puzhichen committed Mar 3, 2025
1 parent e05d6ab commit c419b73
Show file tree
Hide file tree
Showing 5 changed files with 55 additions and 56 deletions.
18 changes: 9 additions & 9 deletions gpu4pyscf/dft/tests/test_libxc.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ def _diff(dat, ref):
return np.min((abs(d/(ref+1e-300)), abs(d)), axis=0)

class KnownValues(unittest.TestCase):
def _check_xc(self, xc, spin=0, fxc_tol=1e-10, kxc_tol=1e-10):
def _check_xc(self, xc, spin=0, deriv=2, fxc_tol=1e-10, kxc_tol=1e-10):
ni_cpu = numint_cpu()
ni_gpu = numint_gpu()
xctype = ni_cpu._xc_type(xc)
Expand All @@ -66,26 +66,26 @@ def _check_xc(self, xc, spin=0, fxc_tol=1e-10, kxc_tol=1e-10):
if spin != 0:
rho = (rho, rho)

exc_cpu, vxc_cpu, fxc_cpu, kxc_cpu = ni_cpu.eval_xc_eff(xc, rho, deriv=2, xctype=xctype)
exc_gpu, vxc_gpu, fxc_gpu, kxc_gpu = ni_gpu.eval_xc_eff(xc, cupy.array(rho), deriv=2, xctype=xctype)
exc_cpu, vxc_cpu, fxc_cpu, kxc_cpu = ni_cpu.eval_xc_eff(xc, rho, deriv=deriv, xctype=xctype)
exc_gpu, vxc_gpu, fxc_gpu, kxc_gpu = ni_gpu.eval_xc_eff(xc, cupy.array(rho), deriv=deriv, xctype=xctype)

assert _diff(exc_gpu[:,0].get(), exc_cpu).max() < 1e-10
assert _diff(vxc_gpu.get(), vxc_cpu).max() < 1e-10
if fxc_gpu is not None:
assert _diff(fxc_gpu.get(), fxc_cpu).max() < fxc_tol
if kxc_gpu is not None:
if deriv >= 3:
assert _diff(kxc_gpu.get(), kxc_cpu).max() < kxc_tol

def test_LDA(self):
self._check_xc('LDA_C_VWN')
self._check_xc('LDA_C_VWN', deriv=3)

def test_GGA(self):
self._check_xc('HYB_GGA_XC_B3LYP')
self._check_xc('GGA_X_B88', fxc_tol=1e-10)
self._check_xc('GGA_C_PBE', fxc_tol=1e-4)
self._check_xc('HYB_GGA_XC_B3LYP', deriv=3, kxc_tol=1e-9)
self._check_xc('GGA_X_B88', deriv=3, fxc_tol=1e-10, kxc_tol=1e-9)
self._check_xc('GGA_C_PBE', deriv=3, fxc_tol=1e-4, kxc_tol=1e2)

def test_mGGA(self):
self._check_xc('MGGA_C_M06', fxc_tol=1e-4)
self._check_xc('MGGA_C_M06', fxc_tol=1e-4, kxc_tol=1e-1)

def test_u_LDA(self):
self._check_xc('LDA_C_VWN', spin=1)
Expand Down
18 changes: 9 additions & 9 deletions gpu4pyscf/grad/tests/test_tdrhf_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def tearDownModule():


def benchmark_with_cpu(mol, nstates=3, lindep=1.0e-12, tda=False):
mf = scf.RHF(mol).run()
mf = scf.RHF(mol).to_gpu().run()
if tda:
td = mf.TDA()
else:
Expand All @@ -114,11 +114,11 @@ def benchmark_with_cpu(mol, nstates=3, lindep=1.0e-12, tda=False):
td.nstates = nstates
td.kernel()

tdgrad_cpu = pyscf.grad.tdrhf.Gradients(td)
td_cpu = td.to_cpu()
tdgrad_cpu = pyscf.grad.tdrhf.Gradients(td_cpu)
tdgrad_cpu.kernel()

td_gpu = td.to_gpu()
tdgrad_gpu = gpu4pyscf.grad.tdrhf.Gradients(td_gpu)
tdgrad_gpu = gpu4pyscf.grad.tdrhf.Gradients(td)
tdgrad_gpu.kernel()

return tdgrad_cpu.de, tdgrad_gpu.de
Expand Down Expand Up @@ -188,15 +188,15 @@ def benchmark_with_finite_diff(
return gradient_ana, grad


def _check_grad(mol, tol=1e-6, disp=None, tda=False, method="cpu"):
def _check_grad(mol, tol=1e-6, lindep=1.0E-12, disp=None, tda=False, method="cpu"):
if method == "cpu":
gradi_cpu, grad_gpu = benchmark_with_cpu(
mol, nstates=5, lindep=1.0e-12, tda=tda
mol, nstates=5, lindep=lindep, tda=tda
)
norm_diff = np.linalg.norm(gradi_cpu - grad_gpu)
elif method == "numerical":
grad_ana, grad = benchmark_with_finite_diff(
mol, delta=0.005, nstates=5, lindep=1.0e-12, tda=tda
mol, delta=0.005, nstates=5, lindep=lindep, tda=tda
)
norm_diff = np.linalg.norm(grad_ana - grad)
assert norm_diff < tol
Expand All @@ -210,10 +210,10 @@ def test_grad_tda_singlet_numerical(self):
_check_grad(mol, tol=1e-4, tda=True, method="numerical")

def test_grad_tdhf_singlet_cpu(self):
_check_grad(mol, tol=1e-10, tda=False, method="cpu")
_check_grad(mol, tol=1e-10, lindep=1.0E-6, tda=False, method="cpu")

def test_grad_tdhf_singlet_numerical(self):
_check_grad(mol, tol=1e-4, tda=False, method="numerical")
_check_grad(mol, tol=1e-4, lindep=1.0E-6, tda=False, method="numerical")


if __name__ == "__main__":
Expand Down
30 changes: 15 additions & 15 deletions gpu4pyscf/grad/tests/test_tdrks_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def tearDownModule():


def benchmark_with_cpu(mol, xc, nstates=3, lindep=1.0e-12, tda=False):
mf = dft.RKS(mol, xc=xc).run()
mf = dft.RKS(mol, xc=xc).to_gpu().run()
if tda:
td = mf.TDA()
else:
Expand All @@ -113,11 +113,11 @@ def benchmark_with_cpu(mol, xc, nstates=3, lindep=1.0e-12, tda=False):
td.nstates = nstates
td.kernel()

tdgrad_cpu = pyscf.grad.tdrks.Gradients(td)
td_cpu = td.to_cpu()
tdgrad_cpu = pyscf.grad.tdrks.Gradients(td_cpu)
tdgrad_cpu.kernel()

td_gpu = td.to_gpu()
tdgrad_gpu = gpu4pyscf.grad.tdrks.Gradients(td_gpu)
tdgrad_gpu = gpu4pyscf.grad.tdrks.Gradients(td)
tdgrad_gpu.kernel()

return tdgrad_cpu.de, tdgrad_gpu.de
Expand Down Expand Up @@ -191,14 +191,14 @@ def benchmark_with_finite_diff(
return gradient_ana, grad


def _check_grad(mol, xc, tol=1e-6, disp=None, tda=False, method="cpu"):
def _check_grad(mol, xc, tol=1e-6, lindep=1.0e-12, disp=None, tda=False, method="cpu"):
if method == "cpu":
gradi_cpu, grad_gpu = benchmark_with_cpu(
mol, xc, nstates=5, lindep=1.0e-12, tda=tda)
mol, xc, nstates=5, lindep=lindep, tda=tda)
norm_diff = np.linalg.norm(gradi_cpu - grad_gpu)
elif method == "numerical":
grad_ana, grad = benchmark_with_finite_diff(
mol, xc, delta=0.005, nstates=5, lindep=1.0e-12, tda=tda)
mol, xc, delta=0.005, nstates=5, lindep=lindep, tda=tda)
norm_diff = np.linalg.norm(grad_ana - grad)
assert norm_diff < tol

Expand All @@ -211,10 +211,10 @@ def test_grad_svwn_tda_singlet_numerical(self):
_check_grad(mol, xc="svwn", tol=1e-4, tda=True, method="numerical")

def test_grad_svwn_tddft_singlet_cpu(self):
_check_grad(mol, xc="svwn", tol=5e-10, tda=False, method="cpu")
_check_grad(mol, xc="svwn", tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_svwn_tddft_singlet_numerical(self):
_check_grad(mol, xc="svwn", tol=1e-4, tda=False, method="numerical")
_check_grad(mol, xc="svwn", tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")

def test_grad_b3lyp_tda_singlet_cpu(self):
_check_grad(mol, xc="b3lyp", tol=5e-10, tda=True, method="cpu")
Expand All @@ -223,10 +223,10 @@ def test_grad_b3lyp_tda_singlet_numerical(self):
_check_grad(mol, xc="b3lyp", tol=1e-4, tda=True, method="numerical")

def test_grad_b3lyp_tddft_singlet_cpu(self):
_check_grad(mol, xc="b3lyp", tol=5e-10, tda=False, method="cpu")
_check_grad(mol, xc="b3lyp", tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_b3lyp_tddft_singlet_numerical(self):
_check_grad(mol, xc="b3lyp", tol=1e-4, tda=False, method="numerical")
_check_grad(mol, xc="b3lyp", tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")

def test_grad_camb3lyp_tda_singlet_cpu(self):
_check_grad(mol, xc="camb3lyp", tol=5e-10, tda=True, method="cpu")
Expand All @@ -235,10 +235,10 @@ def test_grad_camb3lyp_tda_singlet_numerical(self):
_check_grad(mol, xc="camb3lyp", tol=1e-4, tda=True, method="numerical")

def test_grad_camb3lyp_tddft_singlet_cpu(self):
_check_grad(mol, xc="camb3lyp", tol=5e-10, tda=False, method="cpu")
_check_grad(mol, xc="camb3lyp", tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_camb3lyp_tddft_singlet_numerical(self):
_check_grad(mol, xc="camb3lyp", tol=1e-4, tda=False, method="numerical")
_check_grad(mol, xc="camb3lyp", tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")

def test_grad_tpss_tda_singlet_cpu(self):
_check_grad(mol, xc="tpss", tol=5e-10, tda=True, method="cpu")
Expand All @@ -247,10 +247,10 @@ def test_grad_tpss_tda_singlet_numerical(self):
_check_grad(mol, xc="tpss", tol=1e-4, tda=True, method="numerical")

def test_grad_tpss_tddft_singlet_cpu(self):
_check_grad(mol, xc="tpss", tol=5e-10, tda=False, method="cpu")
_check_grad(mol, xc="tpss", tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_tpss_tddft_singlet_numerical(self):
_check_grad(mol, xc="tpss", tol=1e-4, tda=False, method="numerical")
_check_grad(mol, xc="tpss", tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")


if __name__ == "__main__":
Expand Down
18 changes: 9 additions & 9 deletions gpu4pyscf/grad/tests/test_tduhf_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def tearDownModule():


def benchmark_with_cpu(mol, nstates=3, lindep=1.0e-12, tda=False):
mf = scf.UHF(mol).run()
mf = scf.UHF(mol).to_gpu().run()
if tda:
td = mf.TDA()
else:
Expand All @@ -147,11 +147,11 @@ def benchmark_with_cpu(mol, nstates=3, lindep=1.0e-12, tda=False):
td.nstates = nstates
td.kernel()

tdgrad_cpu = pyscf.grad.tduhf.Gradients(td)
td_cpu = td.to_cpu()
tdgrad_cpu = pyscf.grad.tduhf.Gradients(td_cpu)
tdgrad_cpu.kernel()

td_gpu = td.to_gpu()
tdgrad_gpu = gpu4pyscf.grad.tduhf.Gradients(td_gpu)
tdgrad_gpu = gpu4pyscf.grad.tduhf.Gradients(td)
tdgrad_gpu.kernel()

return tdgrad_cpu.de, tdgrad_gpu.de
Expand Down Expand Up @@ -226,14 +226,14 @@ def benchmark_with_finite_diff(
return gradient_ana, grad


def _check_grad(mol, tol=1e-6, disp=None, tda=False, method="cpu"):
def _check_grad(mol, tol=1e-6, lindep=1.0e-12, disp=None, tda=False, method="cpu"):
if method == "cpu":
gradi_cpu, grad_gpu = benchmark_with_cpu(
mol, nstates=5, lindep=1.0e-12, tda=tda)
mol, nstates=5, lindep=lindep, tda=tda)
norm_diff = np.linalg.norm(gradi_cpu - grad_gpu)
elif method == "numerical":
grad_ana, grad = benchmark_with_finite_diff(
mol, delta=0.005, nstates=5, lindep=1.0e-12, tda=tda)
mol, delta=0.005, nstates=5, lindep=lindep, tda=tda)
norm_diff = np.linalg.norm(grad_ana - grad)
assert norm_diff < tol

Expand All @@ -246,10 +246,10 @@ def test_grad_tda_spinconserve_numerical(self):
_check_grad(mol, tol=1e-4, tda=True, method="numerical")

def test_grad_tdhf_spinconserve_cpu(self):
_check_grad(mol, tol=5e-10, tda=False, method="cpu")
_check_grad(mol, tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_tdhf_spinconserve_numerical(self):
_check_grad(mol, tol=1e-4, tda=False, method="numerical")
_check_grad(mol, tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")


if __name__ == "__main__":
Expand Down
27 changes: 13 additions & 14 deletions gpu4pyscf/grad/tests/test_tduks_grad.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,7 @@ def tearDownModule():


def benchmark_with_cpu(mol, xc, nstates=3, lindep=1.0e-12, tda=False):
mf = dft.UKS(mol, xc=xc).run()
mf.kernel()
mf = dft.UKS(mol, xc=xc).to_gpu().run()
if tda:
td = mf.TDA()
else:
Expand All @@ -148,11 +147,11 @@ def benchmark_with_cpu(mol, xc, nstates=3, lindep=1.0e-12, tda=False):
td.nstates = nstates
td.kernel()

tdgrad_cpu = pyscf.grad.tduks.Gradients(td)
td_cpu = td.to_cpu()
tdgrad_cpu = pyscf.grad.tduks.Gradients(td_cpu)
tdgrad_cpu.kernel()

td_gpu = td.to_gpu()
tdgrad_gpu = gpu4pyscf.grad.tduks.Gradients(td_gpu)
tdgrad_gpu = gpu4pyscf.grad.tduks.Gradients(td)
tdgrad_gpu.kernel()

return tdgrad_cpu.de, tdgrad_gpu.de
Expand Down Expand Up @@ -232,14 +231,14 @@ def benchmark_with_finite_diff(
return gradient_ana, grad


def _check_grad(mol, xc, tol=1e-6, disp=None, tda=False, method="cpu"):
def _check_grad(mol, xc, tol=1e-6, lindep=1.0e-12, disp=None, tda=False, method="cpu"):
if method == "cpu":
gradi_cpu, grad_gpu = benchmark_with_cpu(
mol, xc, nstates=5, lindep=1.0e-12, tda=tda)
mol, xc, nstates=5, lindep=lindep, tda=tda)
norm_diff = np.linalg.norm(gradi_cpu - grad_gpu)
elif method == "numerical":
grad_ana, grad = benchmark_with_finite_diff(
mol, xc, delta=0.005, nstates=5, lindep=1.0e-12, tda=tda)
mol, xc, delta=0.005, nstates=5, lindep=lindep, tda=tda)
norm_diff = np.linalg.norm(grad_ana - grad)
assert norm_diff < tol

Expand All @@ -252,10 +251,10 @@ def test_grad_svwn_tda_spinconserving_numerical(self):
_check_grad(mol, xc="svwn", tol=1e-4, tda=True, method="numerical")

def test_grad_svwn_tddft_spinconserving_cpu(self):
_check_grad(mol, xc="svwn", tol=5e-10, tda=False, method="cpu")
_check_grad(mol, xc="svwn", tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_svwn_tddft_spinconserving_numerical(self):
_check_grad(mol, xc="svwn", tol=1e-4, tda=False, method="numerical")
_check_grad(mol, xc="svwn", tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")

def test_grad_camb3lyp_tda_spinconserving_cpu(self):
_check_grad(mol, xc="camb3lyp", tol=5e-10, tda=True, method="cpu")
Expand All @@ -264,10 +263,10 @@ def test_grad_camb3lyp_tda_spinconserving_numerical(self):
_check_grad(mol, xc="camb3lyp", tol=1e-4, tda=True, method="numerical")

def test_grad_camb3lyp_tddft_spinconserving_cpu(self):
_check_grad(mol, xc="camb3lyp", tol=5e-10, tda=False, method="cpu")
_check_grad(mol, xc="camb3lyp", tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_camb3lyp_tddft_spinconserving_numerical(self):
_check_grad(mol, xc="camb3lyp", tol=1e-4, tda=False, method="numerical")
_check_grad(mol, xc="camb3lyp", tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")

def test_grad_tpss_tda_spinconserving_cpu(self):
_check_grad(mol, xc="tpss", tol=5e-10, tda=True, method="cpu")
Expand All @@ -276,10 +275,10 @@ def test_grad_tpss_tda_spinconserving_numerical(self):
_check_grad(mol, xc="tpss", tol=1e-4, tda=True, method="numerical")

def test_grad_tpss_tddft_spinconserving_cpu(self):
_check_grad(mol, xc="tpss", tol=5e-10, tda=False, method="cpu")
_check_grad(mol, xc="tpss", tol=5e-10, lindep=1.0e-6, tda=False, method="cpu")

def test_grad_tpss_tddft_spinconserving_numerical(self):
_check_grad(mol, xc="tpss", tol=1e-4, tda=False, method="numerical")
_check_grad(mol, xc="tpss", tol=1e-4, lindep=1.0e-6, tda=False, method="numerical")


if __name__ == "__main__":
Expand Down

0 comments on commit c419b73

Please sign in to comment.