* Implementation of GiNaC's indexed expressions. */
/*
- * GiNaC Copyright (C) 1999-2006 Johannes Gutenberg University Mainz, Germany
+ * GiNaC Copyright (C) 1999-2015 Johannes Gutenberg University Mainz, Germany
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include <iostream>
-#include <sstream>
-#include <stdexcept>
-
#include "indexed.h"
#include "idx.h"
#include "add.h"
#include "utils.h"
#include "integral.h"
#include "matrix.h"
+#include "inifcns.h"
+
+#include <iostream>
+#include <limits>
+#include <sstream>
+#include <stdexcept>
namespace GiNaC {
indexed::indexed() : symtree(not_symmetric())
{
- tinfo_key = &indexed::tinfo_static;
}
//////////
indexed::indexed(const ex & b) : inherited(b), symtree(not_symmetric())
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const ex & i1) : inherited(b, i1), symtree(not_symmetric())
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const ex & i1, const ex & i2) : inherited(b, i1, i2), symtree(not_symmetric())
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3) : inherited(b, i1, i2, i3), symtree(not_symmetric())
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited(b, i1, i2, i3, i4), symtree(not_symmetric())
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2) : inherited(b, i1, i2), symtree(symm)
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3) : inherited(b, i1, i2, i3), symtree(symm)
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited(b, i1, i2, i3, i4), symtree(symm)
{
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const exvector & v) : inherited(b), symtree(not_symmetric())
{
seq.insert(seq.end(), v.begin(), v.end());
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const ex & b, const symmetry & symm, const exvector & v) : inherited(b), symtree(symm)
{
seq.insert(seq.end(), v.begin(), v.end());
- tinfo_key = &indexed::tinfo_static;
validate();
}
indexed::indexed(const symmetry & symm, const exprseq & es) : inherited(es), symtree(symm)
{
- tinfo_key = &indexed::tinfo_static;
}
indexed::indexed(const symmetry & symm, const exvector & v, bool discardable) : inherited(v, discardable), symtree(symm)
{
- tinfo_key = &indexed::tinfo_static;
}
indexed::indexed(const symmetry & symm, std::auto_ptr<exvector> vp) : inherited(vp), symtree(symm)
{
- tinfo_key = &indexed::tinfo_static;
}
//////////
// archiving
//////////
-indexed::indexed(const archive_node &n, lst &sym_lst) : inherited(n, sym_lst)
+void indexed::read_archive(const archive_node &n, lst &sym_lst)
{
+ inherited::read_archive(n, sym_lst);
if (!n.find_ex("symmetry", symtree, sym_lst)) {
// GiNaC versions <= 0.9.0 had an unsigned "symmetry" property
unsigned symm = 0;
const_cast<symmetry &>(ex_to<symmetry>(symtree)).validate(seq.size() - 1);
}
}
+GINAC_BIND_UNARCHIVER(indexed);
void indexed::archive(archive_node &n) const
{
n.add_ex("symmetry", symtree);
}
-DEFAULT_UNARCHIVE(indexed)
-
//////////
// functions overriding virtual functions from base classes
//////////
return f * thiscontainer(v);
}
- if(this->tinfo()==&indexed::tinfo_static && seq.size()==1)
+ if((typeid(*this) == typeid(indexed)) && seq.size()==1)
return base;
// Canonicalize indices according to the symmetry properties
exvector v = seq;
GINAC_ASSERT(is_exactly_a<symmetry>(symtree));
int sig = canonicalize(v.begin() + 1, ex_to<symmetry>(symtree));
- if (sig != INT_MAX) {
+ if (sig != std::numeric_limits<int>::max()) {
// Something has changed while sorting indices, more evaluations later
if (sig == 0)
return _ex0;
return ex_to<basic>(base).eval_indexed(*this);
}
+ex indexed::real_part() const
+{
+ if(op(0).info(info_flags::real))
+ return *this;
+ return real_part_function(*this).hold();
+}
+
+ex indexed::imag_part() const
+{
+ if(op(0).info(info_flags::real))
+ return 0;
+ return imag_part_function(*this).hold();
+}
+
ex indexed::thiscontainer(const exvector & v) const
{
return indexed(ex_to<symmetry>(symtree), v);
{
bool something_changed = false;
+ // Find dummy symbols that occur twice in the same indexed object.
+ exvector local_var_dummies;
+ local_var_dummies.reserve(e.nops()/2);
+ for (size_t i=1; i<e.nops(); ++i) {
+ if (!is_a<varidx>(e.op(i)))
+ continue;
+ for (size_t j=i+1; j<e.nops(); ++j) {
+ if (is_dummy_pair(e.op(i), e.op(j))) {
+ local_var_dummies.push_back(e.op(i));
+ for (exvector::iterator k = variant_dummy_indices.begin();
+ k!=variant_dummy_indices.end(); ++k) {
+ if (e.op(i).op(0) == k->op(0)) {
+ variant_dummy_indices.erase(k);
+ break;
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ // In the case where a dummy symbol occurs twice in the same indexed object
+ // we try all posibilities of raising/lowering and keep the least one in
+ // the sense of ex_is_less.
+ ex optimal_e = e;
+ size_t numpossibs = 1 << local_var_dummies.size();
+ for (size_t i=0; i<numpossibs; ++i) {
+ ex try_e = e;
+ for (size_t j=0; j<local_var_dummies.size(); ++j) {
+ exmap m;
+ if (1<<j & i) {
+ ex curr_idx = local_var_dummies[j];
+ ex curr_toggle = ex_to<varidx>(curr_idx).toggle_variance();
+ m[curr_idx] = curr_toggle;
+ m[curr_toggle] = curr_idx;
+ }
+ try_e = e.subs(m, subs_options::no_pattern);
+ }
+ if(ex_is_less()(try_e, optimal_e))
+ { optimal_e = try_e;
+ something_changed = true;
+ }
+ }
+ e = optimal_e;
+
+ if (!is_a<indexed>(e))
+ return true;
+
+ exvector seq = ex_to<indexed>(e).seq;
+
// If a dummy index is encountered for the first time in the
// product, pull it up, otherwise, pull it down
- exvector::const_iterator it2, it2start, it2end;
- for (it2start = ex_to<indexed>(e).seq.begin(), it2end = ex_to<indexed>(e).seq.end(), it2 = it2start + 1; it2 != it2end; ++it2) {
+ for (exvector::iterator it2 = seq.begin()+1, it2end = seq.end();
+ it2 != it2end; ++it2) {
if (!is_exactly_a<varidx>(*it2))
continue;
for (vit = variant_dummy_indices.begin(), vitend = variant_dummy_indices.end(); vit != vitend; ++vit) {
if (it2->op(0).is_equal(vit->op(0))) {
if (ex_to<varidx>(*it2).is_covariant()) {
- e = e.subs(lst(
- *it2 == ex_to<varidx>(*it2).toggle_variance(),
- ex_to<varidx>(*it2).toggle_variance() == *it2
- ), subs_options::no_pattern);
+ /*
+ * N.B. we don't want to use
+ *
+ * e = e.subs(lst(
+ * *it2 == ex_to<varidx>(*it2).toggle_variance(),
+ * ex_to<varidx>(*it2).toggle_variance() == *it2
+ * ), subs_options::no_pattern);
+ *
+ * since this can trigger non-trivial repositioning of indices,
+ * e.g. due to non-trivial symmetry properties of e, thus
+ * invalidating iterators
+ */
+ *it2 = ex_to<varidx>(*it2).toggle_variance();
something_changed = true;
- it2 = ex_to<indexed>(e).seq.begin() + (it2 - it2start);
- it2start = ex_to<indexed>(e).seq.begin();
- it2end = ex_to<indexed>(e).seq.end();
}
moved_indices.push_back(*vit);
variant_dummy_indices.erase(vit);
for (vit = moved_indices.begin(), vitend = moved_indices.end(); vit != vitend; ++vit) {
if (it2->op(0).is_equal(vit->op(0))) {
if (ex_to<varidx>(*it2).is_contravariant()) {
- e = e.subs(*it2 == ex_to<varidx>(*it2).toggle_variance(), subs_options::no_pattern);
+ *it2 = ex_to<varidx>(*it2).toggle_variance();
something_changed = true;
- it2 = ex_to<indexed>(e).seq.begin() + (it2 - it2start);
- it2start = ex_to<indexed>(e).seq.begin();
- it2end = ex_to<indexed>(e).seq.end();
}
goto next_index;
}
next_index: ;
}
+ if (something_changed)
+ e = ex_to<indexed>(e).thiscontainer(seq);
+
return something_changed;
}
// Perform contractions
bool something_changed = false;
+ bool has_nonsymmetric = false;
GINAC_ASSERT(v.size() > 1);
exvector::iterator it1, itend = v.end(), next_to_last = itend - 1;
for (it1 = v.begin(); it1 != next_to_last; it1++) {
continue;
bool first_noncommutative = (it1->return_type() != return_types::commutative);
+ bool first_nonsymmetric = ex_to<symmetry>(ex_to<indexed>(*it1).get_symmetry()).has_nonsymmetric();
// Indexed factor found, get free indices and look for contraction
// candidates
// Non-commutative products are always re-expanded to give
// eval_ncmul() the chance to re-order and canonicalize
// the product
+ bool is_a_product = (is_exactly_a<mul>(*it1) || is_exactly_a<ncmul>(*it1)) &&
+ (is_exactly_a<mul>(*it2) || is_exactly_a<ncmul>(*it2));
ex r = (non_commutative ? ex(ncmul(v, true)) : ex(mul(v)));
- return simplify_indexed(r, free_indices, dummy_indices, sp);
+
+ // If new expression is a product we can call this function again,
+ // otherwise we need to pass argument to simplify_indexed() to be expanded
+ if (is_a_product)
+ return simplify_indexed_product(r, free_indices, dummy_indices, sp);
+ else
+ return simplify_indexed(r, free_indices, dummy_indices, sp);
}
// Both objects may have new indices now or they might
something_changed = true;
goto try_again;
}
+ else if (!has_nonsymmetric &&
+ (first_nonsymmetric ||
+ ex_to<symmetry>(ex_to<indexed>(*it2).get_symmetry()).has_nonsymmetric())) {
+ has_nonsymmetric = true;
+ }
}
}
// The result should be symmetric with respect to exchange of dummy
// indices, so if the symmetrization vanishes, the whole expression is
// zero. This detects things like eps.i.j.k * p.j * p.k = 0.
- ex q = idx_symmetrization<idx>(r, local_dummy_indices);
- if (q.is_zero()) {
- free_indices.clear();
- return _ex0;
- }
- q = idx_symmetrization<varidx>(q, local_dummy_indices);
- if (q.is_zero()) {
- free_indices.clear();
- return _ex0;
- }
- q = idx_symmetrization<spinidx>(q, local_dummy_indices);
- if (q.is_zero()) {
- free_indices.clear();
- return _ex0;
+ if (has_nonsymmetric) {
+ ex q = idx_symmetrization<idx>(r, local_dummy_indices);
+ if (q.is_zero()) {
+ free_indices.clear();
+ return _ex0;
+ }
+ q = idx_symmetrization<varidx>(q, local_dummy_indices);
+ if (q.is_zero()) {
+ free_indices.clear();
+ return _ex0;
+ }
+ q = idx_symmetrization<spinidx>(q, local_dummy_indices);
+ if (q.is_zero()) {
+ free_indices.clear();
+ return _ex0;
+ }
}
// Dummy index renaming
else if (is_a<mul>(e) || is_a<ncmul>(e)) {
exvector dummies;
exvector free_indices;
- for (int i=0; i<e.nops(); ++i) {
+ for (std::size_t i = 0; i < e.nops(); ++i) {
exvector dummies_of_factor = get_all_dummy_indices_safely(e.op(i));
dummies.insert(dummies.end(), dummies_of_factor.begin(),
dummies_of_factor.end());
}
else if(is_a<add>(e)) {
exvector result;
- for(int i=0; i<e.nops(); ++i) {
+ for(std::size_t i = 0; i < e.nops(); ++i) {
exvector dummies_of_term = get_all_dummy_indices_safely(e.op(i));
sort(dummies_of_term.begin(), dummies_of_term.end());
exvector new_vec;
pointer_to_map_function_1arg<bool> fcn(expand_dummy_sum, subs_idx);
if (is_a<add>(e_expanded) || is_a<lst>(e_expanded) || is_a<matrix>(e_expanded)) {
return e_expanded.map(fcn);
- } else if (is_a<ncmul>(e_expanded) || is_a<mul>(e_expanded) || is_a<power>(e_expanded)) {
- exvector v = get_all_dummy_indices(e_expanded);
- exvector::const_iterator it = v.begin(), itend = v.end();
- while (it != itend) {
- varidx nu = ex_to<varidx>(*it);
- if (nu.is_dim_numeric()) {
- ex en = 0;
- for (int i=0; i < ex_to<numeric>(nu.get_dim()).to_int(); i++) {
- if (is_a<varidx>(nu) && !subs_idx) {
- en += e_expanded.subs(lst(nu == varidx(i, nu.get_dim(), true), nu.toggle_variance() == varidx(i, nu.get_dim())));
- } else {
- en += e_expanded.subs(lst(nu == idx(i, nu.get_dim()), nu.toggle_variance() == idx(i, nu.get_dim())));
- }
- }
- return expand_dummy_sum(en, subs_idx);
- }
- ++it;
- }
- return e;
- } else if (is_a<indexed>(e_expanded)) {
- exvector v = ex_to<indexed>(e_expanded).get_dummy_indices();
- exvector::const_iterator it = v.begin(), itend = v.end();
- while (it != itend) {
- varidx nu = ex_to<varidx>(*it);
- if (nu.is_dim_numeric()) {
+ } else if (is_a<ncmul>(e_expanded) || is_a<mul>(e_expanded) || is_a<power>(e_expanded) || is_a<indexed>(e_expanded)) {
+ exvector v;
+ if (is_a<indexed>(e_expanded))
+ v = ex_to<indexed>(e_expanded).get_dummy_indices();
+ else
+ v = get_all_dummy_indices(e_expanded);
+ ex result = e_expanded;
+ for(exvector::const_iterator it=v.begin(); it!=v.end(); ++it) {
+ ex nu = *it;
+ if (ex_to<idx>(nu).get_dim().info(info_flags::nonnegint)) {
+ int idim = ex_to<numeric>(ex_to<idx>(nu).get_dim()).to_int();
ex en = 0;
- for (int i=0; i < ex_to<numeric>(nu.get_dim()).to_int(); i++) {
- if (is_a<varidx>(nu) && !subs_idx) {
- en += e_expanded.subs(lst(nu == varidx(i, nu.get_dim(), true), nu.toggle_variance() == varidx(i, nu.get_dim())));
+ for (int i=0; i < idim; i++) {
+ if (subs_idx && is_a<varidx>(nu)) {
+ ex other = ex_to<varidx>(nu).toggle_variance();
+ en += result.subs(lst(
+ nu == idx(i, idim),
+ other == idx(i, idim)
+ ));
} else {
- en += e_expanded.subs(lst(nu == idx(i, nu.get_dim()), nu.toggle_variance() == idx(i, nu.get_dim())));
+ en += result.subs( nu.op(0) == i );
}
}
- return expand_dummy_sum(en, subs_idx);
- }
- ++it;
+ result = en;
+ }
}
- return e;
+ return result;
} else {
return e;
}