3 * Implementation of GiNaC's indexed expressions. */
6 * GiNaC Copyright (C) 1999-2002 Johannes Gutenberg University Mainz, Germany
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #include "relational.h"
41 GINAC_IMPLEMENT_REGISTERED_CLASS(indexed, exprseq)
44 // default ctor, dtor, copy ctor, assignment operator and helpers
47 indexed::indexed() : symtree(sy_none())
49 tinfo_key = TINFO_indexed;
52 void indexed::copy(const indexed & other)
54 inherited::copy(other);
55 symtree = other.symtree;
58 DEFAULT_DESTROY(indexed)
64 indexed::indexed(const ex & b) : inherited(b), symtree(sy_none())
66 tinfo_key = TINFO_indexed;
70 indexed::indexed(const ex & b, const ex & i1) : inherited(b, i1), symtree(sy_none())
72 tinfo_key = TINFO_indexed;
76 indexed::indexed(const ex & b, const ex & i1, const ex & i2) : inherited(b, i1, i2), symtree(sy_none())
78 tinfo_key = TINFO_indexed;
82 indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3) : inherited(b, i1, i2, i3), symtree(sy_none())
84 tinfo_key = TINFO_indexed;
88 indexed::indexed(const ex & b, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited(b, i1, i2, i3, i4), symtree(sy_none())
90 tinfo_key = TINFO_indexed;
94 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2) : inherited(b, i1, i2), symtree(symm)
96 tinfo_key = TINFO_indexed;
100 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3) : inherited(b, i1, i2, i3), symtree(symm)
102 tinfo_key = TINFO_indexed;
106 indexed::indexed(const ex & b, const symmetry & symm, const ex & i1, const ex & i2, const ex & i3, const ex & i4) : inherited(b, i1, i2, i3, i4), symtree(symm)
108 tinfo_key = TINFO_indexed;
112 indexed::indexed(const ex & b, const exvector & v) : inherited(b), symtree(sy_none())
114 seq.insert(seq.end(), v.begin(), v.end());
115 tinfo_key = TINFO_indexed;
119 indexed::indexed(const ex & b, const symmetry & symm, const exvector & v) : inherited(b), symtree(symm)
121 seq.insert(seq.end(), v.begin(), v.end());
122 tinfo_key = TINFO_indexed;
126 indexed::indexed(const symmetry & symm, const exprseq & es) : inherited(es), symtree(symm)
128 tinfo_key = TINFO_indexed;
131 indexed::indexed(const symmetry & symm, const exvector & v, bool discardable) : inherited(v, discardable), symtree(symm)
133 tinfo_key = TINFO_indexed;
136 indexed::indexed(const symmetry & symm, exvector * vp) : inherited(vp), symtree(symm)
138 tinfo_key = TINFO_indexed;
145 indexed::indexed(const archive_node &n, const lst &sym_lst) : inherited(n, sym_lst)
147 if (!n.find_ex("symmetry", symtree, sym_lst)) {
148 // GiNaC versions <= 0.9.0 had an unsigned "symmetry" property
150 n.find_unsigned("symmetry", symm);
162 const_cast<symmetry &>(ex_to<symmetry>(symtree)).validate(seq.size() - 1);
166 void indexed::archive(archive_node &n) const
168 inherited::archive(n);
169 n.add_ex("symmetry", symtree);
172 DEFAULT_UNARCHIVE(indexed)
175 // functions overriding virtual functions from base classes
178 void indexed::print(const print_context & c, unsigned level) const
180 GINAC_ASSERT(seq.size() > 0);
182 if (is_of_type(c, print_tree)) {
184 c.s << std::string(level, ' ') << class_name()
185 << std::hex << ", hash=0x" << hashvalue << ", flags=0x" << flags << std::dec
186 << ", " << seq.size()-1 << " indices"
187 << ", symmetry=" << symtree << std::endl;
188 unsigned delta_indent = static_cast<const print_tree &>(c).delta_indent;
189 seq[0].print(c, level + delta_indent);
190 printindices(c, level + delta_indent);
194 bool is_tex = is_of_type(c, print_latex);
195 const ex & base = seq[0];
196 bool need_parens = is_ex_exactly_of_type(base, add) || is_ex_exactly_of_type(base, mul)
197 || is_ex_exactly_of_type(base, ncmul) || is_ex_exactly_of_type(base, power)
198 || is_ex_of_type(base, indexed);
208 printindices(c, level);
212 bool indexed::info(unsigned inf) const
214 if (inf == info_flags::indexed) return true;
215 if (inf == info_flags::has_indices) return seq.size() > 1;
216 return inherited::info(inf);
219 struct idx_is_not : public std::binary_function<ex, unsigned, bool> {
220 bool operator() (const ex & e, unsigned inf) const {
221 return !(ex_to<idx>(e).get_value().info(inf));
225 bool indexed::all_index_values_are(unsigned inf) const
227 // No indices? Then no property can be fulfilled
232 return find_if(seq.begin() + 1, seq.end(), bind2nd(idx_is_not(), inf)) == seq.end();
235 int indexed::compare_same_type(const basic & other) const
237 GINAC_ASSERT(is_a<indexed>(other));
238 return inherited::compare_same_type(other);
241 ex indexed::eval(int level) const
243 // First evaluate children, then we will end up here again
245 return indexed(ex_to<symmetry>(symtree), evalchildren(level));
247 const ex &base = seq[0];
249 // If the base object is 0, the whole object is 0
253 // If the base object is a product, pull out the numeric factor
254 if (is_ex_exactly_of_type(base, mul) && is_ex_exactly_of_type(base.op(base.nops() - 1), numeric)) {
256 ex f = ex_to<numeric>(base.op(base.nops() - 1));
258 return f * thisexprseq(v);
261 // Canonicalize indices according to the symmetry properties
262 if (seq.size() > 2) {
264 GINAC_ASSERT(is_exactly_a<symmetry>(symtree));
265 int sig = canonicalize(v.begin() + 1, ex_to<symmetry>(symtree));
266 if (sig != INT_MAX) {
267 // Something has changed while sorting indices, more evaluations later
270 return ex(sig) * thisexprseq(v);
274 // Let the class of the base object perform additional evaluations
275 return ex_to<basic>(base).eval_indexed(*this);
278 ex indexed::thisexprseq(const exvector & v) const
280 return indexed(ex_to<symmetry>(symtree), v);
283 ex indexed::thisexprseq(exvector * vp) const
285 return indexed(ex_to<symmetry>(symtree), vp);
288 ex indexed::expand(unsigned options) const
290 GINAC_ASSERT(seq.size() > 0);
292 if ((options & expand_options::expand_indexed) && is_ex_exactly_of_type(seq[0], add)) {
294 // expand_indexed expands (a+b).i -> a.i + b.i
295 const ex & base = seq[0];
297 for (unsigned i=0; i<base.nops(); i++) {
300 sum += thisexprseq(s).expand();
305 return inherited::expand(options);
309 // virtual functions which can be overridden by derived classes
315 // non-virtual functions in this class
318 void indexed::printindices(const print_context & c, unsigned level) const
320 if (seq.size() > 1) {
322 exvector::const_iterator it=seq.begin() + 1, itend = seq.end();
324 if (is_of_type(c, print_latex)) {
326 // TeX output: group by variance
328 bool covariant = true;
330 while (it != itend) {
331 bool cur_covariant = (is_ex_of_type(*it, varidx) ? ex_to<varidx>(*it).is_covariant() : true);
332 if (first || cur_covariant != covariant) { // Variance changed
333 // The empty {} prevents indices from ending up on top of each other
336 covariant = cur_covariant;
352 while (it != itend) {
360 /** Check whether all indices are of class idx and validate the symmetry
361 * tree. This function is used internally to make sure that all constructed
362 * indexed objects really carry indices and not some other classes. */
363 void indexed::validate(void) const
365 GINAC_ASSERT(seq.size() > 0);
366 exvector::const_iterator it = seq.begin() + 1, itend = seq.end();
367 while (it != itend) {
368 if (!is_ex_of_type(*it, idx))
369 throw(std::invalid_argument("indices of indexed object must be of type idx"));
373 if (!symtree.is_zero()) {
374 if (!is_ex_exactly_of_type(symtree, symmetry))
375 throw(std::invalid_argument("symmetry of indexed object must be of type symmetry"));
376 const_cast<symmetry &>(ex_to<symmetry>(symtree)).validate(seq.size() - 1);
380 /** Implementation of ex::diff() for an indexed object always returns 0.
383 ex indexed::derivative(const symbol & s) const
392 /** Check whether two sorted index vectors are consistent (i.e. equal). */
393 static bool indices_consistent(const exvector & v1, const exvector & v2)
395 // Number of indices must be the same
396 if (v1.size() != v2.size())
399 return equal(v1.begin(), v1.end(), v2.begin(), ex_is_equal());
402 exvector indexed::get_indices(void) const
404 GINAC_ASSERT(seq.size() >= 1);
405 return exvector(seq.begin() + 1, seq.end());
408 exvector indexed::get_dummy_indices(void) const
410 exvector free_indices, dummy_indices;
411 find_free_and_dummy(seq.begin() + 1, seq.end(), free_indices, dummy_indices);
412 return dummy_indices;
415 exvector indexed::get_dummy_indices(const indexed & other) const
417 exvector indices = get_free_indices();
418 exvector other_indices = other.get_free_indices();
419 indices.insert(indices.end(), other_indices.begin(), other_indices.end());
420 exvector dummy_indices;
421 find_dummy_indices(indices, dummy_indices);
422 return dummy_indices;
425 bool indexed::has_dummy_index_for(const ex & i) const
427 exvector::const_iterator it = seq.begin() + 1, itend = seq.end();
428 while (it != itend) {
429 if (is_dummy_pair(*it, i))
436 exvector indexed::get_free_indices(void) const
438 exvector free_indices, dummy_indices;
439 find_free_and_dummy(seq.begin() + 1, seq.end(), free_indices, dummy_indices);
443 exvector add::get_free_indices(void) const
445 exvector free_indices;
446 for (unsigned i=0; i<nops(); i++) {
448 free_indices = op(i).get_free_indices();
450 exvector free_indices_of_term = op(i).get_free_indices();
451 if (!indices_consistent(free_indices, free_indices_of_term))
452 throw (std::runtime_error("add::get_free_indices: inconsistent indices in sum"));
458 exvector mul::get_free_indices(void) const
460 // Concatenate free indices of all factors
462 for (unsigned i=0; i<nops(); i++) {
463 exvector free_indices_of_factor = op(i).get_free_indices();
464 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
467 // And remove the dummy indices
468 exvector free_indices, dummy_indices;
469 find_free_and_dummy(un, free_indices, dummy_indices);
473 exvector ncmul::get_free_indices(void) const
475 // Concatenate free indices of all factors
477 for (unsigned i=0; i<nops(); i++) {
478 exvector free_indices_of_factor = op(i).get_free_indices();
479 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
482 // And remove the dummy indices
483 exvector free_indices, dummy_indices;
484 find_free_and_dummy(un, free_indices, dummy_indices);
488 exvector power::get_free_indices(void) const
490 // Return free indices of basis
491 return basis.get_free_indices();
494 /** Rename dummy indices in an expression.
496 * @param e Expression to be worked on
497 * @param local_dummy_indices The set of dummy indices that appear in the
499 * @param global_dummy_indices The set of dummy indices that have appeared
500 * before and which we would like to use in "e", too. This gets updated
502 static ex rename_dummy_indices(const ex & e, exvector & global_dummy_indices, exvector & local_dummy_indices)
504 unsigned global_size = global_dummy_indices.size(),
505 local_size = local_dummy_indices.size();
507 // Any local dummy indices at all?
511 if (global_size < local_size) {
513 // More local indices than we encountered before, add the new ones
515 int old_global_size = global_size;
516 int remaining = local_size - global_size;
517 exvector::const_iterator it = local_dummy_indices.begin(), itend = local_dummy_indices.end();
518 while (it != itend && remaining > 0) {
519 if (find_if(global_dummy_indices.begin(), global_dummy_indices.end(), bind2nd(ex_is_equal(), *it)) == global_dummy_indices.end()) {
520 global_dummy_indices.push_back(*it);
527 // If this is the first set of local indices, do nothing
528 if (old_global_size == 0)
531 GINAC_ASSERT(local_size <= global_size);
533 // Construct lists of index symbols
534 exlist local_syms, global_syms;
535 for (unsigned i=0; i<local_size; i++)
536 local_syms.push_back(local_dummy_indices[i].op(0));
537 shaker_sort(local_syms.begin(), local_syms.end(), ex_is_less(), ex_swap());
538 for (unsigned i=0; i<global_size; i++)
539 global_syms.push_back(global_dummy_indices[i].op(0));
540 shaker_sort(global_syms.begin(), global_syms.end(), ex_is_less(), ex_swap());
542 // Remove common indices
543 exlist local_uniq, global_uniq;
544 set_difference(local_syms.begin(), local_syms.end(), global_syms.begin(), global_syms.end(), std::back_insert_iterator<exlist>(local_uniq), ex_is_less());
545 set_difference(global_syms.begin(), global_syms.end(), local_syms.begin(), local_syms.end(), std::back_insert_iterator<exlist>(global_uniq), ex_is_less());
547 // Replace remaining non-common local index symbols by global ones
548 if (local_uniq.empty())
551 while (global_uniq.size() > local_uniq.size())
552 global_uniq.pop_back();
553 return e.subs(lst(local_uniq), lst(global_uniq));
557 /* Ordering that only compares the base expressions of indexed objects. */
558 struct ex_base_is_less : public std::binary_function<ex, ex, bool> {
559 bool operator() (const ex &lh, const ex &rh) const
561 return (is_a<indexed>(lh) ? lh.op(0) : lh).compare(is_a<indexed>(rh) ? rh.op(0) : rh) < 0;
565 /** Simplify product of indexed expressions (commutative, noncommutative and
566 * simple squares), return list of free indices. */
567 ex simplify_indexed_product(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
569 // Remember whether the product was commutative or noncommutative
570 // (because we chop it into factors and need to reassemble later)
571 bool non_commutative = is_ex_exactly_of_type(e, ncmul);
573 // Collect factors in an exvector, store squares twice
575 v.reserve(e.nops() * 2);
577 if (is_ex_exactly_of_type(e, power)) {
578 // We only get called for simple squares, split a^2 -> a*a
579 GINAC_ASSERT(e.op(1).is_equal(_ex2));
580 v.push_back(e.op(0));
581 v.push_back(e.op(0));
583 for (unsigned i=0; i<e.nops(); i++) {
585 if (is_ex_exactly_of_type(f, power) && f.op(1).is_equal(_ex2)) {
586 v.push_back(f.op(0));
587 v.push_back(f.op(0));
588 } else if (is_ex_exactly_of_type(f, ncmul)) {
589 // Noncommutative factor found, split it as well
590 non_commutative = true; // everything becomes noncommutative, ncmul will sort out the commutative factors later
591 for (unsigned j=0; j<f.nops(); j++)
592 v.push_back(f.op(j));
598 // Perform contractions
599 bool something_changed = false;
600 GINAC_ASSERT(v.size() > 1);
601 exvector::iterator it1, itend = v.end(), next_to_last = itend - 1;
602 for (it1 = v.begin(); it1 != next_to_last; it1++) {
605 if (!is_ex_of_type(*it1, indexed))
608 bool first_noncommutative = (it1->return_type() != return_types::commutative);
610 // Indexed factor found, get free indices and look for contraction
612 exvector free1, dummy1;
613 find_free_and_dummy(ex_to<indexed>(*it1).seq.begin() + 1, ex_to<indexed>(*it1).seq.end(), free1, dummy1);
615 exvector::iterator it2;
616 for (it2 = it1 + 1; it2 != itend; it2++) {
618 if (!is_ex_of_type(*it2, indexed))
621 bool second_noncommutative = (it2->return_type() != return_types::commutative);
623 // Find free indices of second factor and merge them with free
624 // indices of first factor
626 find_free_and_dummy(ex_to<indexed>(*it2).seq.begin() + 1, ex_to<indexed>(*it2).seq.end(), un, dummy1);
627 un.insert(un.end(), free1.begin(), free1.end());
629 // Check whether the two factors share dummy indices
630 exvector free, dummy;
631 find_free_and_dummy(un, free, dummy);
632 unsigned num_dummies = dummy.size();
633 if (num_dummies == 0)
636 // At least one dummy index, is it a defined scalar product?
637 bool contracted = false;
639 if (sp.is_defined(*it1, *it2)) {
640 *it1 = sp.evaluate(*it1, *it2);
642 goto contraction_done;
646 // Try to contract the first one with the second one
647 contracted = ex_to<basic>(it1->op(0)).contract_with(it1, it2, v);
650 // That didn't work; maybe the second object knows how to
651 // contract itself with the first one
652 contracted = ex_to<basic>(it2->op(0)).contract_with(it2, it1, v);
656 if (first_noncommutative || second_noncommutative
657 || is_ex_exactly_of_type(*it1, add) || is_ex_exactly_of_type(*it2, add)
658 || is_ex_exactly_of_type(*it1, mul) || is_ex_exactly_of_type(*it2, mul)
659 || is_ex_exactly_of_type(*it1, ncmul) || is_ex_exactly_of_type(*it2, ncmul)) {
661 // One of the factors became a sum or product:
662 // re-expand expression and run again
663 // Non-commutative products are always re-expanded to give
664 // simplify_ncmul() the chance to re-order and canonicalize
666 ex r = (non_commutative ? ex(ncmul(v, true)) : ex(mul(v)));
667 return simplify_indexed(r, free_indices, dummy_indices, sp);
670 // Both objects may have new indices now or they might
671 // even not be indexed objects any more, so we have to
673 something_changed = true;
679 // Find free indices (concatenate them all and call find_free_and_dummy())
680 // and all dummy indices that appear
681 exvector un, individual_dummy_indices;
682 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
683 exvector free_indices_of_factor;
684 if (is_ex_of_type(*it1, indexed)) {
685 exvector dummy_indices_of_factor;
686 find_free_and_dummy(ex_to<indexed>(*it1).seq.begin() + 1, ex_to<indexed>(*it1).seq.end(), free_indices_of_factor, dummy_indices_of_factor);
687 individual_dummy_indices.insert(individual_dummy_indices.end(), dummy_indices_of_factor.begin(), dummy_indices_of_factor.end());
689 free_indices_of_factor = it1->get_free_indices();
690 un.insert(un.end(), free_indices_of_factor.begin(), free_indices_of_factor.end());
692 exvector local_dummy_indices;
693 find_free_and_dummy(un, free_indices, local_dummy_indices);
694 local_dummy_indices.insert(local_dummy_indices.end(), individual_dummy_indices.begin(), individual_dummy_indices.end());
696 // Filter out the dummy indices with variance
697 exvector variant_dummy_indices;
698 for (it1 = local_dummy_indices.begin(), itend = local_dummy_indices.end(); it1 != itend; ++it1) {
699 if (is_exactly_a<varidx>(*it1))
700 variant_dummy_indices.push_back(*it1);
703 // Any indices with variance present at all?
704 if (!variant_dummy_indices.empty()) {
706 // Yes, bring the product into a canonical order that only depends on
707 // the base expressions of indexed objects
708 if (!non_commutative)
709 std::sort(v.begin(), v.end(), ex_base_is_less());
711 exvector moved_indices;
713 // Iterate over all indexed objects in the product
714 for (it1 = v.begin(), itend = v.end(); it1 != itend; ++it1) {
715 if (!is_ex_of_type(*it1, indexed))
719 bool it1_dirty = false; // It this is true, then new_it1 holds a new value for *it1
721 // If a dummy index is encountered for the first time in the
722 // product, pull it up, otherwise, pull it down
723 exvector::iterator it2, it2end;
724 for (it2 = const_cast<indexed &>(ex_to<indexed>(*it1)).seq.begin(), it2end = const_cast<indexed &>(ex_to<indexed>(*it1)).seq.end(); it2 != it2end; ++it2) {
725 if (!is_exactly_a<varidx>(*it2))
728 exvector::iterator vit, vitend;
729 for (vit = variant_dummy_indices.begin(), vitend = variant_dummy_indices.end(); vit != vitend; ++vit) {
730 if (it2->op(0).is_equal(vit->op(0))) {
731 if (ex_to<varidx>(*it2).is_covariant()) {
732 new_it1 = (it1_dirty ? new_it1 : *it1).subs(*it2 == ex_to<varidx>(*it2).toggle_variance());
734 something_changed = true;
736 moved_indices.push_back(*vit);
737 variant_dummy_indices.erase(vit);
742 for (vit = moved_indices.begin(), vitend = moved_indices.end(); vit != vitend; ++vit) {
743 if (it2->op(0).is_equal(vit->op(0))) {
744 if (ex_to<varidx>(*it2).is_contravariant()) {
745 new_it1 = (it1_dirty ? new_it1 : *it1).subs(*it2 == ex_to<varidx>(*it2).toggle_variance());
747 something_changed = true;
762 if (something_changed)
763 r = non_commutative ? ex(ncmul(v, true)) : ex(mul(v));
767 // The result should be symmetric with respect to exchange of dummy
768 // indices, so if the symmetrization vanishes, the whole expression is
769 // zero. This detects things like eps.i.j.k * p.j * p.k = 0.
770 if (local_dummy_indices.size() >= 2) {
772 for (int i=0; i<local_dummy_indices.size(); i++)
773 dummy_syms.append(local_dummy_indices[i].op(0));
774 if (r.symmetrize(dummy_syms).is_zero()) {
775 free_indices.clear();
780 // Dummy index renaming
781 r = rename_dummy_indices(r, dummy_indices, local_dummy_indices);
783 // Product of indexed object with a scalar?
784 if (is_ex_exactly_of_type(r, mul) && r.nops() == 2
785 && is_ex_exactly_of_type(r.op(1), numeric) && is_ex_of_type(r.op(0), indexed))
786 return ex_to<basic>(r.op(0).op(0)).scalar_mul_indexed(r.op(0), ex_to<numeric>(r.op(1)));
791 /** Simplify indexed expression, return list of free indices. */
792 ex simplify_indexed(const ex & e, exvector & free_indices, exvector & dummy_indices, const scalar_products & sp)
794 // Expand the expression
795 ex e_expanded = e.expand();
797 // Simplification of single indexed object: just find the free indices
798 // and perform dummy index renaming
799 if (is_ex_of_type(e_expanded, indexed)) {
800 const indexed &i = ex_to<indexed>(e_expanded);
801 exvector local_dummy_indices;
802 find_free_and_dummy(i.seq.begin() + 1, i.seq.end(), free_indices, local_dummy_indices);
803 return rename_dummy_indices(e_expanded, dummy_indices, local_dummy_indices);
806 // Simplification of sum = sum of simplifications, check consistency of
807 // free indices in each term
808 if (is_ex_exactly_of_type(e_expanded, add)) {
811 free_indices.clear();
813 for (unsigned i=0; i<e_expanded.nops(); i++) {
814 exvector free_indices_of_term;
815 ex term = simplify_indexed(e_expanded.op(i), free_indices_of_term, dummy_indices, sp);
816 if (!term.is_zero()) {
818 free_indices = free_indices_of_term;
822 if (!indices_consistent(free_indices, free_indices_of_term))
823 throw (std::runtime_error("simplify_indexed: inconsistent indices in sum"));
824 if (is_ex_of_type(sum, indexed) && is_ex_of_type(term, indexed))
825 sum = ex_to<basic>(sum.op(0)).add_indexed(sum, term);
835 // Simplification of products
836 if (is_ex_exactly_of_type(e_expanded, mul)
837 || is_ex_exactly_of_type(e_expanded, ncmul)
838 || (is_ex_exactly_of_type(e_expanded, power) && is_ex_of_type(e_expanded.op(0), indexed) && e_expanded.op(1).is_equal(_ex2)))
839 return simplify_indexed_product(e_expanded, free_indices, dummy_indices, sp);
841 // Cannot do anything
842 free_indices.clear();
846 /** Simplify/canonicalize expression containing indexed objects. This
847 * performs contraction of dummy indices where possible and checks whether
848 * the free indices in sums are consistent.
850 * @return simplified expression */
851 ex ex::simplify_indexed(void) const
853 exvector free_indices, dummy_indices;
855 return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
858 /** Simplify/canonicalize expression containing indexed objects. This
859 * performs contraction of dummy indices where possible, checks whether
860 * the free indices in sums are consistent, and automatically replaces
861 * scalar products by known values if desired.
863 * @param sp Scalar products to be replaced automatically
864 * @return simplified expression */
865 ex ex::simplify_indexed(const scalar_products & sp) const
867 exvector free_indices, dummy_indices;
868 return GiNaC::simplify_indexed(*this, free_indices, dummy_indices, sp);
871 /** Symmetrize expression over its free indices. */
872 ex ex::symmetrize(void) const
874 return GiNaC::symmetrize(*this, get_free_indices());
877 /** Antisymmetrize expression over its free indices. */
878 ex ex::antisymmetrize(void) const
880 return GiNaC::antisymmetrize(*this, get_free_indices());
883 /** Symmetrize expression by cyclic permutation over its free indices. */
884 ex ex::symmetrize_cyclic(void) const
886 return GiNaC::symmetrize_cyclic(*this, get_free_indices());
893 void scalar_products::add(const ex & v1, const ex & v2, const ex & sp)
895 spm[make_key(v1, v2)] = sp;
898 void scalar_products::add_vectors(const lst & l)
900 // Add all possible pairs of products
901 unsigned num = l.nops();
902 for (unsigned i=0; i<num; i++) {
904 for (unsigned j=0; j<num; j++) {
911 void scalar_products::clear(void)
916 /** Check whether scalar product pair is defined. */
917 bool scalar_products::is_defined(const ex & v1, const ex & v2) const
919 return spm.find(make_key(v1, v2)) != spm.end();
922 /** Return value of defined scalar product pair. */
923 ex scalar_products::evaluate(const ex & v1, const ex & v2) const
925 return spm.find(make_key(v1, v2))->second;
928 void scalar_products::debugprint(void) const
930 std::cerr << "map size=" << spm.size() << std::endl;
931 spmap::const_iterator i = spm.begin(), end = spm.end();
933 const spmapkey & k = i->first;
934 std::cerr << "item key=(" << k.first << "," << k.second;
935 std::cerr << "), value=" << i->second << std::endl;
940 /** Make key from object pair. */
941 spmapkey scalar_products::make_key(const ex & v1, const ex & v2)
943 // If indexed, extract base objects
944 ex s1 = is_ex_of_type(v1, indexed) ? v1.op(0) : v1;
945 ex s2 = is_ex_of_type(v2, indexed) ? v2.op(0) : v2;
947 // Enforce canonical order in pair
948 if (s1.compare(s2) > 0)
949 return spmapkey(s2, s1);
951 return spmapkey(s1, s2);